| 1 | //===- ARMISelLowering.cpp - ARM DAG Lowering Implementation --------------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This file defines the interfaces that ARM uses to lower LLVM code into a |
| 10 | // selection DAG. |
| 11 | // |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | |
| 14 | #include "ARMISelLowering.h" |
| 15 | #include "ARMBaseInstrInfo.h" |
| 16 | #include "ARMBaseRegisterInfo.h" |
| 17 | #include "ARMCallingConv.h" |
| 18 | #include "ARMConstantPoolValue.h" |
| 19 | #include "ARMMachineFunctionInfo.h" |
| 20 | #include "ARMPerfectShuffle.h" |
| 21 | #include "ARMRegisterInfo.h" |
| 22 | #include "ARMSelectionDAGInfo.h" |
| 23 | #include "ARMSubtarget.h" |
| 24 | #include "MCTargetDesc/ARMAddressingModes.h" |
| 25 | #include "MCTargetDesc/ARMBaseInfo.h" |
| 26 | #include "Utils/ARMBaseInfo.h" |
| 27 | #include "llvm/ADT/APFloat.h" |
| 28 | #include "llvm/ADT/APInt.h" |
| 29 | #include "llvm/ADT/ArrayRef.h" |
| 30 | #include "llvm/ADT/BitVector.h" |
| 31 | #include "llvm/ADT/DenseMap.h" |
| 32 | #include "llvm/ADT/STLExtras.h" |
| 33 | #include "llvm/ADT/SmallPtrSet.h" |
| 34 | #include "llvm/ADT/SmallVector.h" |
| 35 | #include "llvm/ADT/Statistic.h" |
| 36 | #include "llvm/ADT/StringExtras.h" |
| 37 | #include "llvm/ADT/StringRef.h" |
| 38 | #include "llvm/ADT/StringSwitch.h" |
| 39 | #include "llvm/ADT/Triple.h" |
| 40 | #include "llvm/ADT/Twine.h" |
| 41 | #include "llvm/Analysis/VectorUtils.h" |
| 42 | #include "llvm/CodeGen/CallingConvLower.h" |
| 43 | #include "llvm/CodeGen/ISDOpcodes.h" |
| 44 | #include "llvm/CodeGen/IntrinsicLowering.h" |
| 45 | #include "llvm/CodeGen/MachineBasicBlock.h" |
| 46 | #include "llvm/CodeGen/MachineConstantPool.h" |
| 47 | #include "llvm/CodeGen/MachineFrameInfo.h" |
| 48 | #include "llvm/CodeGen/MachineFunction.h" |
| 49 | #include "llvm/CodeGen/MachineInstr.h" |
| 50 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
| 51 | #include "llvm/CodeGen/MachineJumpTableInfo.h" |
| 52 | #include "llvm/CodeGen/MachineMemOperand.h" |
| 53 | #include "llvm/CodeGen/MachineOperand.h" |
| 54 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
| 55 | #include "llvm/CodeGen/RuntimeLibcalls.h" |
| 56 | #include "llvm/CodeGen/SelectionDAG.h" |
| 57 | #include "llvm/CodeGen/SelectionDAGNodes.h" |
| 58 | #include "llvm/CodeGen/TargetInstrInfo.h" |
| 59 | #include "llvm/CodeGen/TargetLowering.h" |
| 60 | #include "llvm/CodeGen/TargetOpcodes.h" |
| 61 | #include "llvm/CodeGen/TargetRegisterInfo.h" |
| 62 | #include "llvm/CodeGen/TargetSubtargetInfo.h" |
| 63 | #include "llvm/CodeGen/ValueTypes.h" |
| 64 | #include "llvm/IR/Attributes.h" |
| 65 | #include "llvm/IR/CallingConv.h" |
| 66 | #include "llvm/IR/Constant.h" |
| 67 | #include "llvm/IR/Constants.h" |
| 68 | #include "llvm/IR/DataLayout.h" |
| 69 | #include "llvm/IR/DebugLoc.h" |
| 70 | #include "llvm/IR/DerivedTypes.h" |
| 71 | #include "llvm/IR/Function.h" |
| 72 | #include "llvm/IR/GlobalAlias.h" |
| 73 | #include "llvm/IR/GlobalValue.h" |
| 74 | #include "llvm/IR/GlobalVariable.h" |
| 75 | #include "llvm/IR/IRBuilder.h" |
| 76 | #include "llvm/IR/InlineAsm.h" |
| 77 | #include "llvm/IR/Instruction.h" |
| 78 | #include "llvm/IR/Instructions.h" |
| 79 | #include "llvm/IR/IntrinsicInst.h" |
| 80 | #include "llvm/IR/Intrinsics.h" |
| 81 | #include "llvm/IR/IntrinsicsARM.h" |
| 82 | #include "llvm/IR/Module.h" |
| 83 | #include "llvm/IR/PatternMatch.h" |
| 84 | #include "llvm/IR/Type.h" |
| 85 | #include "llvm/IR/User.h" |
| 86 | #include "llvm/IR/Value.h" |
| 87 | #include "llvm/MC/MCInstrDesc.h" |
| 88 | #include "llvm/MC/MCInstrItineraries.h" |
| 89 | #include "llvm/MC/MCRegisterInfo.h" |
| 90 | #include "llvm/MC/MCSchedule.h" |
| 91 | #include "llvm/Support/AtomicOrdering.h" |
| 92 | #include "llvm/Support/BranchProbability.h" |
| 93 | #include "llvm/Support/Casting.h" |
| 94 | #include "llvm/Support/CodeGen.h" |
| 95 | #include "llvm/Support/CommandLine.h" |
| 96 | #include "llvm/Support/Compiler.h" |
| 97 | #include "llvm/Support/Debug.h" |
| 98 | #include "llvm/Support/ErrorHandling.h" |
| 99 | #include "llvm/Support/KnownBits.h" |
| 100 | #include "llvm/Support/MachineValueType.h" |
| 101 | #include "llvm/Support/MathExtras.h" |
| 102 | #include "llvm/Support/raw_ostream.h" |
| 103 | #include "llvm/Target/TargetMachine.h" |
| 104 | #include "llvm/Target/TargetOptions.h" |
| 105 | #include <algorithm> |
| 106 | #include <cassert> |
| 107 | #include <cstdint> |
| 108 | #include <cstdlib> |
| 109 | #include <iterator> |
| 110 | #include <limits> |
| 111 | #include <string> |
| 112 | #include <tuple> |
| 113 | #include <utility> |
| 114 | #include <vector> |
| 115 | |
| 116 | using namespace llvm; |
| 117 | using namespace llvm::PatternMatch; |
| 118 | |
| 119 | #define DEBUG_TYPE "arm-isel" |
| 120 | |
| 121 | STATISTIC(NumTailCalls, "Number of tail calls" ); |
| 122 | STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt" ); |
| 123 | STATISTIC(NumLoopByVals, "Number of loops generated for byval arguments" ); |
| 124 | STATISTIC(NumConstpoolPromoted, |
| 125 | "Number of constants with their storage promoted into constant pools" ); |
| 126 | |
| 127 | static cl::opt<bool> |
| 128 | ARMInterworking("arm-interworking" , cl::Hidden, |
| 129 | cl::desc("Enable / disable ARM interworking (for debugging only)" ), |
| 130 | cl::init(true)); |
| 131 | |
| 132 | static cl::opt<bool> EnableConstpoolPromotion( |
| 133 | "arm-promote-constant" , cl::Hidden, |
| 134 | cl::desc("Enable / disable promotion of unnamed_addr constants into " |
| 135 | "constant pools" ), |
| 136 | cl::init(false)); // FIXME: set to true by default once PR32780 is fixed |
| 137 | static cl::opt<unsigned> ConstpoolPromotionMaxSize( |
| 138 | "arm-promote-constant-max-size" , cl::Hidden, |
| 139 | cl::desc("Maximum size of constant to promote into a constant pool" ), |
| 140 | cl::init(64)); |
| 141 | static cl::opt<unsigned> ConstpoolPromotionMaxTotal( |
| 142 | "arm-promote-constant-max-total" , cl::Hidden, |
| 143 | cl::desc("Maximum size of ALL constants to promote into a constant pool" ), |
| 144 | cl::init(128)); |
| 145 | |
| 146 | cl::opt<unsigned> |
| 147 | MVEMaxSupportedInterleaveFactor("mve-max-interleave-factor" , cl::Hidden, |
| 148 | cl::desc("Maximum interleave factor for MVE VLDn to generate." ), |
| 149 | cl::init(2)); |
| 150 | |
| 151 | // The APCS parameter registers. |
| 152 | static const MCPhysReg GPRArgRegs[] = { |
| 153 | ARM::R0, ARM::R1, ARM::R2, ARM::R3 |
| 154 | }; |
| 155 | |
| 156 | void ARMTargetLowering::addTypeForNEON(MVT VT, MVT PromotedLdStVT, |
| 157 | MVT PromotedBitwiseVT) { |
| 158 | if (VT != PromotedLdStVT) { |
| 159 | setOperationAction(ISD::LOAD, VT, Promote); |
| 160 | AddPromotedToType (ISD::LOAD, VT, PromotedLdStVT); |
| 161 | |
| 162 | setOperationAction(ISD::STORE, VT, Promote); |
| 163 | AddPromotedToType (ISD::STORE, VT, PromotedLdStVT); |
| 164 | } |
| 165 | |
| 166 | MVT ElemTy = VT.getVectorElementType(); |
| 167 | if (ElemTy != MVT::f64) |
| 168 | setOperationAction(ISD::SETCC, VT, Custom); |
| 169 | setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); |
| 170 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); |
| 171 | if (ElemTy == MVT::i32) { |
| 172 | setOperationAction(ISD::SINT_TO_FP, VT, Custom); |
| 173 | setOperationAction(ISD::UINT_TO_FP, VT, Custom); |
| 174 | setOperationAction(ISD::FP_TO_SINT, VT, Custom); |
| 175 | setOperationAction(ISD::FP_TO_UINT, VT, Custom); |
| 176 | } else { |
| 177 | setOperationAction(ISD::SINT_TO_FP, VT, Expand); |
| 178 | setOperationAction(ISD::UINT_TO_FP, VT, Expand); |
| 179 | setOperationAction(ISD::FP_TO_SINT, VT, Expand); |
| 180 | setOperationAction(ISD::FP_TO_UINT, VT, Expand); |
| 181 | } |
| 182 | setOperationAction(ISD::BUILD_VECTOR, VT, Custom); |
| 183 | setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); |
| 184 | setOperationAction(ISD::CONCAT_VECTORS, VT, Legal); |
| 185 | setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal); |
| 186 | setOperationAction(ISD::SELECT, VT, Expand); |
| 187 | setOperationAction(ISD::SELECT_CC, VT, Expand); |
| 188 | setOperationAction(ISD::VSELECT, VT, Expand); |
| 189 | setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); |
| 190 | if (VT.isInteger()) { |
| 191 | setOperationAction(ISD::SHL, VT, Custom); |
| 192 | setOperationAction(ISD::SRA, VT, Custom); |
| 193 | setOperationAction(ISD::SRL, VT, Custom); |
| 194 | } |
| 195 | |
| 196 | // Promote all bit-wise operations. |
| 197 | if (VT.isInteger() && VT != PromotedBitwiseVT) { |
| 198 | setOperationAction(ISD::AND, VT, Promote); |
| 199 | AddPromotedToType (ISD::AND, VT, PromotedBitwiseVT); |
| 200 | setOperationAction(ISD::OR, VT, Promote); |
| 201 | AddPromotedToType (ISD::OR, VT, PromotedBitwiseVT); |
| 202 | setOperationAction(ISD::XOR, VT, Promote); |
| 203 | AddPromotedToType (ISD::XOR, VT, PromotedBitwiseVT); |
| 204 | } |
| 205 | |
| 206 | // Neon does not support vector divide/remainder operations. |
| 207 | setOperationAction(ISD::SDIV, VT, Expand); |
| 208 | setOperationAction(ISD::UDIV, VT, Expand); |
| 209 | setOperationAction(ISD::FDIV, VT, Expand); |
| 210 | setOperationAction(ISD::SREM, VT, Expand); |
| 211 | setOperationAction(ISD::UREM, VT, Expand); |
| 212 | setOperationAction(ISD::FREM, VT, Expand); |
| 213 | setOperationAction(ISD::SDIVREM, VT, Expand); |
| 214 | setOperationAction(ISD::UDIVREM, VT, Expand); |
| 215 | |
| 216 | if (!VT.isFloatingPoint() && |
| 217 | VT != MVT::v2i64 && VT != MVT::v1i64) |
| 218 | for (auto Opcode : {ISD::ABS, ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}) |
| 219 | setOperationAction(Opcode, VT, Legal); |
| 220 | if (!VT.isFloatingPoint()) |
| 221 | for (auto Opcode : {ISD::SADDSAT, ISD::UADDSAT, ISD::SSUBSAT, ISD::USUBSAT}) |
| 222 | setOperationAction(Opcode, VT, Legal); |
| 223 | } |
| 224 | |
| 225 | void ARMTargetLowering::addDRTypeForNEON(MVT VT) { |
| 226 | addRegisterClass(VT, &ARM::DPRRegClass); |
| 227 | addTypeForNEON(VT, MVT::f64, MVT::v2i32); |
| 228 | } |
| 229 | |
| 230 | void ARMTargetLowering::addQRTypeForNEON(MVT VT) { |
| 231 | addRegisterClass(VT, &ARM::DPairRegClass); |
| 232 | addTypeForNEON(VT, MVT::v2f64, MVT::v4i32); |
| 233 | } |
| 234 | |
| 235 | void ARMTargetLowering::setAllExpand(MVT VT) { |
| 236 | for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc) |
| 237 | setOperationAction(Opc, VT, Expand); |
| 238 | |
| 239 | // We support these really simple operations even on types where all |
| 240 | // the actual arithmetic has to be broken down into simpler |
| 241 | // operations or turned into library calls. |
| 242 | setOperationAction(ISD::BITCAST, VT, Legal); |
| 243 | setOperationAction(ISD::LOAD, VT, Legal); |
| 244 | setOperationAction(ISD::STORE, VT, Legal); |
| 245 | setOperationAction(ISD::UNDEF, VT, Legal); |
| 246 | } |
| 247 | |
| 248 | void ARMTargetLowering::addAllExtLoads(const MVT From, const MVT To, |
| 249 | LegalizeAction Action) { |
| 250 | setLoadExtAction(ISD::EXTLOAD, From, To, Action); |
| 251 | setLoadExtAction(ISD::ZEXTLOAD, From, To, Action); |
| 252 | setLoadExtAction(ISD::SEXTLOAD, From, To, Action); |
| 253 | } |
| 254 | |
| 255 | void ARMTargetLowering::addMVEVectorTypes(bool HasMVEFP) { |
| 256 | const MVT IntTypes[] = { MVT::v16i8, MVT::v8i16, MVT::v4i32 }; |
| 257 | |
| 258 | for (auto VT : IntTypes) { |
| 259 | addRegisterClass(VT, &ARM::MQPRRegClass); |
| 260 | setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); |
| 261 | setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); |
| 262 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); |
| 263 | setOperationAction(ISD::BUILD_VECTOR, VT, Custom); |
| 264 | setOperationAction(ISD::SHL, VT, Custom); |
| 265 | setOperationAction(ISD::SRA, VT, Custom); |
| 266 | setOperationAction(ISD::SRL, VT, Custom); |
| 267 | setOperationAction(ISD::SMIN, VT, Legal); |
| 268 | setOperationAction(ISD::SMAX, VT, Legal); |
| 269 | setOperationAction(ISD::UMIN, VT, Legal); |
| 270 | setOperationAction(ISD::UMAX, VT, Legal); |
| 271 | setOperationAction(ISD::ABS, VT, Legal); |
| 272 | setOperationAction(ISD::SETCC, VT, Custom); |
| 273 | setOperationAction(ISD::MLOAD, VT, Custom); |
| 274 | setOperationAction(ISD::MSTORE, VT, Legal); |
| 275 | setOperationAction(ISD::CTLZ, VT, Legal); |
| 276 | setOperationAction(ISD::CTTZ, VT, Custom); |
| 277 | setOperationAction(ISD::BITREVERSE, VT, Legal); |
| 278 | setOperationAction(ISD::BSWAP, VT, Legal); |
| 279 | setOperationAction(ISD::SADDSAT, VT, Legal); |
| 280 | setOperationAction(ISD::UADDSAT, VT, Legal); |
| 281 | setOperationAction(ISD::SSUBSAT, VT, Legal); |
| 282 | setOperationAction(ISD::USUBSAT, VT, Legal); |
| 283 | |
| 284 | // No native support for these. |
| 285 | setOperationAction(ISD::UDIV, VT, Expand); |
| 286 | setOperationAction(ISD::SDIV, VT, Expand); |
| 287 | setOperationAction(ISD::UREM, VT, Expand); |
| 288 | setOperationAction(ISD::SREM, VT, Expand); |
| 289 | setOperationAction(ISD::UDIVREM, VT, Expand); |
| 290 | setOperationAction(ISD::SDIVREM, VT, Expand); |
| 291 | setOperationAction(ISD::CTPOP, VT, Expand); |
| 292 | setOperationAction(ISD::SELECT, VT, Expand); |
| 293 | setOperationAction(ISD::SELECT_CC, VT, Expand); |
| 294 | |
| 295 | // Vector reductions |
| 296 | setOperationAction(ISD::VECREDUCE_ADD, VT, Legal); |
| 297 | setOperationAction(ISD::VECREDUCE_SMAX, VT, Legal); |
| 298 | setOperationAction(ISD::VECREDUCE_UMAX, VT, Legal); |
| 299 | setOperationAction(ISD::VECREDUCE_SMIN, VT, Legal); |
| 300 | setOperationAction(ISD::VECREDUCE_UMIN, VT, Legal); |
| 301 | setOperationAction(ISD::VECREDUCE_MUL, VT, Custom); |
| 302 | setOperationAction(ISD::VECREDUCE_AND, VT, Custom); |
| 303 | setOperationAction(ISD::VECREDUCE_OR, VT, Custom); |
| 304 | setOperationAction(ISD::VECREDUCE_XOR, VT, Custom); |
| 305 | |
| 306 | if (!HasMVEFP) { |
| 307 | setOperationAction(ISD::SINT_TO_FP, VT, Expand); |
| 308 | setOperationAction(ISD::UINT_TO_FP, VT, Expand); |
| 309 | setOperationAction(ISD::FP_TO_SINT, VT, Expand); |
| 310 | setOperationAction(ISD::FP_TO_UINT, VT, Expand); |
| 311 | } |
| 312 | |
| 313 | // Pre and Post inc are supported on loads and stores |
| 314 | for (unsigned im = (unsigned)ISD::PRE_INC; |
| 315 | im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { |
| 316 | setIndexedLoadAction(im, VT, Legal); |
| 317 | setIndexedStoreAction(im, VT, Legal); |
| 318 | setIndexedMaskedLoadAction(im, VT, Legal); |
| 319 | setIndexedMaskedStoreAction(im, VT, Legal); |
| 320 | } |
| 321 | } |
| 322 | |
| 323 | const MVT FloatTypes[] = { MVT::v8f16, MVT::v4f32 }; |
| 324 | for (auto VT : FloatTypes) { |
| 325 | addRegisterClass(VT, &ARM::MQPRRegClass); |
| 326 | if (!HasMVEFP) |
| 327 | setAllExpand(VT); |
| 328 | |
| 329 | // These are legal or custom whether we have MVE.fp or not |
| 330 | setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); |
| 331 | setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); |
| 332 | setOperationAction(ISD::INSERT_VECTOR_ELT, VT.getVectorElementType(), Custom); |
| 333 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); |
| 334 | setOperationAction(ISD::BUILD_VECTOR, VT, Custom); |
| 335 | setOperationAction(ISD::BUILD_VECTOR, VT.getVectorElementType(), Custom); |
| 336 | setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Legal); |
| 337 | setOperationAction(ISD::SETCC, VT, Custom); |
| 338 | setOperationAction(ISD::MLOAD, VT, Custom); |
| 339 | setOperationAction(ISD::MSTORE, VT, Legal); |
| 340 | setOperationAction(ISD::SELECT, VT, Expand); |
| 341 | setOperationAction(ISD::SELECT_CC, VT, Expand); |
| 342 | |
| 343 | // Pre and Post inc are supported on loads and stores |
| 344 | for (unsigned im = (unsigned)ISD::PRE_INC; |
| 345 | im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { |
| 346 | setIndexedLoadAction(im, VT, Legal); |
| 347 | setIndexedStoreAction(im, VT, Legal); |
| 348 | setIndexedMaskedLoadAction(im, VT, Legal); |
| 349 | setIndexedMaskedStoreAction(im, VT, Legal); |
| 350 | } |
| 351 | |
| 352 | if (HasMVEFP) { |
| 353 | setOperationAction(ISD::FMINNUM, VT, Legal); |
| 354 | setOperationAction(ISD::FMAXNUM, VT, Legal); |
| 355 | setOperationAction(ISD::FROUND, VT, Legal); |
| 356 | setOperationAction(ISD::VECREDUCE_FADD, VT, Custom); |
| 357 | setOperationAction(ISD::VECREDUCE_FMUL, VT, Custom); |
| 358 | setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom); |
| 359 | setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom); |
| 360 | |
| 361 | // No native support for these. |
| 362 | setOperationAction(ISD::FDIV, VT, Expand); |
| 363 | setOperationAction(ISD::FREM, VT, Expand); |
| 364 | setOperationAction(ISD::FSQRT, VT, Expand); |
| 365 | setOperationAction(ISD::FSIN, VT, Expand); |
| 366 | setOperationAction(ISD::FCOS, VT, Expand); |
| 367 | setOperationAction(ISD::FPOW, VT, Expand); |
| 368 | setOperationAction(ISD::FLOG, VT, Expand); |
| 369 | setOperationAction(ISD::FLOG2, VT, Expand); |
| 370 | setOperationAction(ISD::FLOG10, VT, Expand); |
| 371 | setOperationAction(ISD::FEXP, VT, Expand); |
| 372 | setOperationAction(ISD::FEXP2, VT, Expand); |
| 373 | setOperationAction(ISD::FNEARBYINT, VT, Expand); |
| 374 | } |
| 375 | } |
| 376 | |
| 377 | // Custom Expand smaller than legal vector reductions to prevent false zero |
| 378 | // items being added. |
| 379 | setOperationAction(ISD::VECREDUCE_FADD, MVT::v4f16, Custom); |
| 380 | setOperationAction(ISD::VECREDUCE_FMUL, MVT::v4f16, Custom); |
| 381 | setOperationAction(ISD::VECREDUCE_FMIN, MVT::v4f16, Custom); |
| 382 | setOperationAction(ISD::VECREDUCE_FMAX, MVT::v4f16, Custom); |
| 383 | setOperationAction(ISD::VECREDUCE_FADD, MVT::v2f16, Custom); |
| 384 | setOperationAction(ISD::VECREDUCE_FMUL, MVT::v2f16, Custom); |
| 385 | setOperationAction(ISD::VECREDUCE_FMIN, MVT::v2f16, Custom); |
| 386 | setOperationAction(ISD::VECREDUCE_FMAX, MVT::v2f16, Custom); |
| 387 | |
| 388 | // We 'support' these types up to bitcast/load/store level, regardless of |
| 389 | // MVE integer-only / float support. Only doing FP data processing on the FP |
| 390 | // vector types is inhibited at integer-only level. |
| 391 | const MVT LongTypes[] = { MVT::v2i64, MVT::v2f64 }; |
| 392 | for (auto VT : LongTypes) { |
| 393 | addRegisterClass(VT, &ARM::MQPRRegClass); |
| 394 | setAllExpand(VT); |
| 395 | setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); |
| 396 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); |
| 397 | setOperationAction(ISD::BUILD_VECTOR, VT, Custom); |
| 398 | } |
| 399 | // We can do bitwise operations on v2i64 vectors |
| 400 | setOperationAction(ISD::AND, MVT::v2i64, Legal); |
| 401 | setOperationAction(ISD::OR, MVT::v2i64, Legal); |
| 402 | setOperationAction(ISD::XOR, MVT::v2i64, Legal); |
| 403 | |
| 404 | // It is legal to extload from v4i8 to v4i16 or v4i32. |
| 405 | addAllExtLoads(MVT::v8i16, MVT::v8i8, Legal); |
| 406 | addAllExtLoads(MVT::v4i32, MVT::v4i16, Legal); |
| 407 | addAllExtLoads(MVT::v4i32, MVT::v4i8, Legal); |
| 408 | |
| 409 | // It is legal to sign extend from v4i8/v4i16 to v4i32 or v8i8 to v8i16. |
| 410 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Legal); |
| 411 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Legal); |
| 412 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i32, Legal); |
| 413 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v8i8, Legal); |
| 414 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v8i16, Legal); |
| 415 | |
| 416 | // Some truncating stores are legal too. |
| 417 | setTruncStoreAction(MVT::v4i32, MVT::v4i16, Legal); |
| 418 | setTruncStoreAction(MVT::v4i32, MVT::v4i8, Legal); |
| 419 | setTruncStoreAction(MVT::v8i16, MVT::v8i8, Legal); |
| 420 | |
| 421 | // Pre and Post inc on these are legal, given the correct extends |
| 422 | for (unsigned im = (unsigned)ISD::PRE_INC; |
| 423 | im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { |
| 424 | for (auto VT : {MVT::v8i8, MVT::v4i8, MVT::v4i16}) { |
| 425 | setIndexedLoadAction(im, VT, Legal); |
| 426 | setIndexedStoreAction(im, VT, Legal); |
| 427 | setIndexedMaskedLoadAction(im, VT, Legal); |
| 428 | setIndexedMaskedStoreAction(im, VT, Legal); |
| 429 | } |
| 430 | } |
| 431 | |
| 432 | // Predicate types |
| 433 | const MVT pTypes[] = {MVT::v16i1, MVT::v8i1, MVT::v4i1}; |
| 434 | for (auto VT : pTypes) { |
| 435 | addRegisterClass(VT, &ARM::VCCRRegClass); |
| 436 | setOperationAction(ISD::BUILD_VECTOR, VT, Custom); |
| 437 | setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); |
| 438 | setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); |
| 439 | setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); |
| 440 | setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); |
| 441 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); |
| 442 | setOperationAction(ISD::SETCC, VT, Custom); |
| 443 | setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand); |
| 444 | setOperationAction(ISD::LOAD, VT, Custom); |
| 445 | setOperationAction(ISD::STORE, VT, Custom); |
| 446 | setOperationAction(ISD::TRUNCATE, VT, Custom); |
| 447 | setOperationAction(ISD::VSELECT, VT, Expand); |
| 448 | setOperationAction(ISD::SELECT, VT, Expand); |
| 449 | } |
| 450 | } |
| 451 | |
| 452 | ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM, |
| 453 | const ARMSubtarget &STI) |
| 454 | : TargetLowering(TM), Subtarget(&STI) { |
| 455 | RegInfo = Subtarget->getRegisterInfo(); |
| 456 | Itins = Subtarget->getInstrItineraryData(); |
| 457 | |
| 458 | setBooleanContents(ZeroOrOneBooleanContent); |
| 459 | setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); |
| 460 | |
| 461 | if (!Subtarget->isTargetDarwin() && !Subtarget->isTargetIOS() && |
| 462 | !Subtarget->isTargetWatchOS()) { |
| 463 | bool IsHFTarget = TM.Options.FloatABIType == FloatABI::Hard; |
| 464 | for (int LCID = 0; LCID < RTLIB::UNKNOWN_LIBCALL; ++LCID) |
| 465 | setLibcallCallingConv(static_cast<RTLIB::Libcall>(LCID), |
| 466 | IsHFTarget ? CallingConv::ARM_AAPCS_VFP |
| 467 | : CallingConv::ARM_AAPCS); |
| 468 | } |
| 469 | |
| 470 | if (Subtarget->isTargetMachO()) { |
| 471 | // Uses VFP for Thumb libfuncs if available. |
| 472 | if (Subtarget->isThumb() && Subtarget->hasVFP2Base() && |
| 473 | Subtarget->hasARMOps() && !Subtarget->useSoftFloat()) { |
| 474 | static const struct { |
| 475 | const RTLIB::Libcall Op; |
| 476 | const char * const Name; |
| 477 | const ISD::CondCode Cond; |
| 478 | } LibraryCalls[] = { |
| 479 | // Single-precision floating-point arithmetic. |
| 480 | { RTLIB::ADD_F32, "__addsf3vfp" , ISD::SETCC_INVALID }, |
| 481 | { RTLIB::SUB_F32, "__subsf3vfp" , ISD::SETCC_INVALID }, |
| 482 | { RTLIB::MUL_F32, "__mulsf3vfp" , ISD::SETCC_INVALID }, |
| 483 | { RTLIB::DIV_F32, "__divsf3vfp" , ISD::SETCC_INVALID }, |
| 484 | |
| 485 | // Double-precision floating-point arithmetic. |
| 486 | { RTLIB::ADD_F64, "__adddf3vfp" , ISD::SETCC_INVALID }, |
| 487 | { RTLIB::SUB_F64, "__subdf3vfp" , ISD::SETCC_INVALID }, |
| 488 | { RTLIB::MUL_F64, "__muldf3vfp" , ISD::SETCC_INVALID }, |
| 489 | { RTLIB::DIV_F64, "__divdf3vfp" , ISD::SETCC_INVALID }, |
| 490 | |
| 491 | // Single-precision comparisons. |
| 492 | { RTLIB::OEQ_F32, "__eqsf2vfp" , ISD::SETNE }, |
| 493 | { RTLIB::UNE_F32, "__nesf2vfp" , ISD::SETNE }, |
| 494 | { RTLIB::OLT_F32, "__ltsf2vfp" , ISD::SETNE }, |
| 495 | { RTLIB::OLE_F32, "__lesf2vfp" , ISD::SETNE }, |
| 496 | { RTLIB::OGE_F32, "__gesf2vfp" , ISD::SETNE }, |
| 497 | { RTLIB::OGT_F32, "__gtsf2vfp" , ISD::SETNE }, |
| 498 | { RTLIB::UO_F32, "__unordsf2vfp" , ISD::SETNE }, |
| 499 | |
| 500 | // Double-precision comparisons. |
| 501 | { RTLIB::OEQ_F64, "__eqdf2vfp" , ISD::SETNE }, |
| 502 | { RTLIB::UNE_F64, "__nedf2vfp" , ISD::SETNE }, |
| 503 | { RTLIB::OLT_F64, "__ltdf2vfp" , ISD::SETNE }, |
| 504 | { RTLIB::OLE_F64, "__ledf2vfp" , ISD::SETNE }, |
| 505 | { RTLIB::OGE_F64, "__gedf2vfp" , ISD::SETNE }, |
| 506 | { RTLIB::OGT_F64, "__gtdf2vfp" , ISD::SETNE }, |
| 507 | { RTLIB::UO_F64, "__unorddf2vfp" , ISD::SETNE }, |
| 508 | |
| 509 | // Floating-point to integer conversions. |
| 510 | // i64 conversions are done via library routines even when generating VFP |
| 511 | // instructions, so use the same ones. |
| 512 | { RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp" , ISD::SETCC_INVALID }, |
| 513 | { RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp" , ISD::SETCC_INVALID }, |
| 514 | { RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp" , ISD::SETCC_INVALID }, |
| 515 | { RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp" , ISD::SETCC_INVALID }, |
| 516 | |
| 517 | // Conversions between floating types. |
| 518 | { RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp" , ISD::SETCC_INVALID }, |
| 519 | { RTLIB::FPEXT_F32_F64, "__extendsfdf2vfp" , ISD::SETCC_INVALID }, |
| 520 | |
| 521 | // Integer to floating-point conversions. |
| 522 | // i64 conversions are done via library routines even when generating VFP |
| 523 | // instructions, so use the same ones. |
| 524 | // FIXME: There appears to be some naming inconsistency in ARM libgcc: |
| 525 | // e.g., __floatunsidf vs. __floatunssidfvfp. |
| 526 | { RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp" , ISD::SETCC_INVALID }, |
| 527 | { RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp" , ISD::SETCC_INVALID }, |
| 528 | { RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp" , ISD::SETCC_INVALID }, |
| 529 | { RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp" , ISD::SETCC_INVALID }, |
| 530 | }; |
| 531 | |
| 532 | for (const auto &LC : LibraryCalls) { |
| 533 | setLibcallName(LC.Op, LC.Name); |
| 534 | if (LC.Cond != ISD::SETCC_INVALID) |
| 535 | setCmpLibcallCC(LC.Op, LC.Cond); |
| 536 | } |
| 537 | } |
| 538 | } |
| 539 | |
| 540 | // These libcalls are not available in 32-bit. |
| 541 | setLibcallName(RTLIB::SHL_I128, nullptr); |
| 542 | setLibcallName(RTLIB::SRL_I128, nullptr); |
| 543 | setLibcallName(RTLIB::SRA_I128, nullptr); |
| 544 | |
| 545 | // RTLIB |
| 546 | if (Subtarget->isAAPCS_ABI() && |
| 547 | (Subtarget->isTargetAEABI() || Subtarget->isTargetGNUAEABI() || |
| 548 | Subtarget->isTargetMuslAEABI() || Subtarget->isTargetAndroid())) { |
| 549 | static const struct { |
| 550 | const RTLIB::Libcall Op; |
| 551 | const char * const Name; |
| 552 | const CallingConv::ID CC; |
| 553 | const ISD::CondCode Cond; |
| 554 | } LibraryCalls[] = { |
| 555 | // Double-precision floating-point arithmetic helper functions |
| 556 | // RTABI chapter 4.1.2, Table 2 |
| 557 | { RTLIB::ADD_F64, "__aeabi_dadd" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 558 | { RTLIB::DIV_F64, "__aeabi_ddiv" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 559 | { RTLIB::MUL_F64, "__aeabi_dmul" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 560 | { RTLIB::SUB_F64, "__aeabi_dsub" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 561 | |
| 562 | // Double-precision floating-point comparison helper functions |
| 563 | // RTABI chapter 4.1.2, Table 3 |
| 564 | { RTLIB::OEQ_F64, "__aeabi_dcmpeq" , CallingConv::ARM_AAPCS, ISD::SETNE }, |
| 565 | { RTLIB::UNE_F64, "__aeabi_dcmpeq" , CallingConv::ARM_AAPCS, ISD::SETEQ }, |
| 566 | { RTLIB::OLT_F64, "__aeabi_dcmplt" , CallingConv::ARM_AAPCS, ISD::SETNE }, |
| 567 | { RTLIB::OLE_F64, "__aeabi_dcmple" , CallingConv::ARM_AAPCS, ISD::SETNE }, |
| 568 | { RTLIB::OGE_F64, "__aeabi_dcmpge" , CallingConv::ARM_AAPCS, ISD::SETNE }, |
| 569 | { RTLIB::OGT_F64, "__aeabi_dcmpgt" , CallingConv::ARM_AAPCS, ISD::SETNE }, |
| 570 | { RTLIB::UO_F64, "__aeabi_dcmpun" , CallingConv::ARM_AAPCS, ISD::SETNE }, |
| 571 | |
| 572 | // Single-precision floating-point arithmetic helper functions |
| 573 | // RTABI chapter 4.1.2, Table 4 |
| 574 | { RTLIB::ADD_F32, "__aeabi_fadd" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 575 | { RTLIB::DIV_F32, "__aeabi_fdiv" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 576 | { RTLIB::MUL_F32, "__aeabi_fmul" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 577 | { RTLIB::SUB_F32, "__aeabi_fsub" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 578 | |
| 579 | // Single-precision floating-point comparison helper functions |
| 580 | // RTABI chapter 4.1.2, Table 5 |
| 581 | { RTLIB::OEQ_F32, "__aeabi_fcmpeq" , CallingConv::ARM_AAPCS, ISD::SETNE }, |
| 582 | { RTLIB::UNE_F32, "__aeabi_fcmpeq" , CallingConv::ARM_AAPCS, ISD::SETEQ }, |
| 583 | { RTLIB::OLT_F32, "__aeabi_fcmplt" , CallingConv::ARM_AAPCS, ISD::SETNE }, |
| 584 | { RTLIB::OLE_F32, "__aeabi_fcmple" , CallingConv::ARM_AAPCS, ISD::SETNE }, |
| 585 | { RTLIB::OGE_F32, "__aeabi_fcmpge" , CallingConv::ARM_AAPCS, ISD::SETNE }, |
| 586 | { RTLIB::OGT_F32, "__aeabi_fcmpgt" , CallingConv::ARM_AAPCS, ISD::SETNE }, |
| 587 | { RTLIB::UO_F32, "__aeabi_fcmpun" , CallingConv::ARM_AAPCS, ISD::SETNE }, |
| 588 | |
| 589 | // Floating-point to integer conversions. |
| 590 | // RTABI chapter 4.1.2, Table 6 |
| 591 | { RTLIB::FPTOSINT_F64_I32, "__aeabi_d2iz" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 592 | { RTLIB::FPTOUINT_F64_I32, "__aeabi_d2uiz" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 593 | { RTLIB::FPTOSINT_F64_I64, "__aeabi_d2lz" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 594 | { RTLIB::FPTOUINT_F64_I64, "__aeabi_d2ulz" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 595 | { RTLIB::FPTOSINT_F32_I32, "__aeabi_f2iz" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 596 | { RTLIB::FPTOUINT_F32_I32, "__aeabi_f2uiz" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 597 | { RTLIB::FPTOSINT_F32_I64, "__aeabi_f2lz" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 598 | { RTLIB::FPTOUINT_F32_I64, "__aeabi_f2ulz" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 599 | |
| 600 | // Conversions between floating types. |
| 601 | // RTABI chapter 4.1.2, Table 7 |
| 602 | { RTLIB::FPROUND_F64_F32, "__aeabi_d2f" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 603 | { RTLIB::FPROUND_F64_F16, "__aeabi_d2h" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 604 | { RTLIB::FPEXT_F32_F64, "__aeabi_f2d" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 605 | |
| 606 | // Integer to floating-point conversions. |
| 607 | // RTABI chapter 4.1.2, Table 8 |
| 608 | { RTLIB::SINTTOFP_I32_F64, "__aeabi_i2d" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 609 | { RTLIB::UINTTOFP_I32_F64, "__aeabi_ui2d" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 610 | { RTLIB::SINTTOFP_I64_F64, "__aeabi_l2d" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 611 | { RTLIB::UINTTOFP_I64_F64, "__aeabi_ul2d" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 612 | { RTLIB::SINTTOFP_I32_F32, "__aeabi_i2f" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 613 | { RTLIB::UINTTOFP_I32_F32, "__aeabi_ui2f" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 614 | { RTLIB::SINTTOFP_I64_F32, "__aeabi_l2f" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 615 | { RTLIB::UINTTOFP_I64_F32, "__aeabi_ul2f" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 616 | |
| 617 | // Long long helper functions |
| 618 | // RTABI chapter 4.2, Table 9 |
| 619 | { RTLIB::MUL_I64, "__aeabi_lmul" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 620 | { RTLIB::SHL_I64, "__aeabi_llsl" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 621 | { RTLIB::SRL_I64, "__aeabi_llsr" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 622 | { RTLIB::SRA_I64, "__aeabi_lasr" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 623 | |
| 624 | // Integer division functions |
| 625 | // RTABI chapter 4.3.1 |
| 626 | { RTLIB::SDIV_I8, "__aeabi_idiv" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 627 | { RTLIB::SDIV_I16, "__aeabi_idiv" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 628 | { RTLIB::SDIV_I32, "__aeabi_idiv" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 629 | { RTLIB::SDIV_I64, "__aeabi_ldivmod" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 630 | { RTLIB::UDIV_I8, "__aeabi_uidiv" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 631 | { RTLIB::UDIV_I16, "__aeabi_uidiv" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 632 | { RTLIB::UDIV_I32, "__aeabi_uidiv" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 633 | { RTLIB::UDIV_I64, "__aeabi_uldivmod" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 634 | }; |
| 635 | |
| 636 | for (const auto &LC : LibraryCalls) { |
| 637 | setLibcallName(LC.Op, LC.Name); |
| 638 | setLibcallCallingConv(LC.Op, LC.CC); |
| 639 | if (LC.Cond != ISD::SETCC_INVALID) |
| 640 | setCmpLibcallCC(LC.Op, LC.Cond); |
| 641 | } |
| 642 | |
| 643 | // EABI dependent RTLIB |
| 644 | if (TM.Options.EABIVersion == EABI::EABI4 || |
| 645 | TM.Options.EABIVersion == EABI::EABI5) { |
| 646 | static const struct { |
| 647 | const RTLIB::Libcall Op; |
| 648 | const char *const Name; |
| 649 | const CallingConv::ID CC; |
| 650 | const ISD::CondCode Cond; |
| 651 | } MemOpsLibraryCalls[] = { |
| 652 | // Memory operations |
| 653 | // RTABI chapter 4.3.4 |
| 654 | { RTLIB::MEMCPY, "__aeabi_memcpy" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 655 | { RTLIB::MEMMOVE, "__aeabi_memmove" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 656 | { RTLIB::MEMSET, "__aeabi_memset" , CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, |
| 657 | }; |
| 658 | |
| 659 | for (const auto &LC : MemOpsLibraryCalls) { |
| 660 | setLibcallName(LC.Op, LC.Name); |
| 661 | setLibcallCallingConv(LC.Op, LC.CC); |
| 662 | if (LC.Cond != ISD::SETCC_INVALID) |
| 663 | setCmpLibcallCC(LC.Op, LC.Cond); |
| 664 | } |
| 665 | } |
| 666 | } |
| 667 | |
| 668 | if (Subtarget->isTargetWindows()) { |
| 669 | static const struct { |
| 670 | const RTLIB::Libcall Op; |
| 671 | const char * const Name; |
| 672 | const CallingConv::ID CC; |
| 673 | } LibraryCalls[] = { |
| 674 | { RTLIB::FPTOSINT_F32_I64, "__stoi64" , CallingConv::ARM_AAPCS_VFP }, |
| 675 | { RTLIB::FPTOSINT_F64_I64, "__dtoi64" , CallingConv::ARM_AAPCS_VFP }, |
| 676 | { RTLIB::FPTOUINT_F32_I64, "__stou64" , CallingConv::ARM_AAPCS_VFP }, |
| 677 | { RTLIB::FPTOUINT_F64_I64, "__dtou64" , CallingConv::ARM_AAPCS_VFP }, |
| 678 | { RTLIB::SINTTOFP_I64_F32, "__i64tos" , CallingConv::ARM_AAPCS_VFP }, |
| 679 | { RTLIB::SINTTOFP_I64_F64, "__i64tod" , CallingConv::ARM_AAPCS_VFP }, |
| 680 | { RTLIB::UINTTOFP_I64_F32, "__u64tos" , CallingConv::ARM_AAPCS_VFP }, |
| 681 | { RTLIB::UINTTOFP_I64_F64, "__u64tod" , CallingConv::ARM_AAPCS_VFP }, |
| 682 | }; |
| 683 | |
| 684 | for (const auto &LC : LibraryCalls) { |
| 685 | setLibcallName(LC.Op, LC.Name); |
| 686 | setLibcallCallingConv(LC.Op, LC.CC); |
| 687 | } |
| 688 | } |
| 689 | |
| 690 | // Use divmod compiler-rt calls for iOS 5.0 and later. |
| 691 | if (Subtarget->isTargetMachO() && |
| 692 | !(Subtarget->isTargetIOS() && |
| 693 | Subtarget->getTargetTriple().isOSVersionLT(5, 0))) { |
| 694 | setLibcallName(RTLIB::SDIVREM_I32, "__divmodsi4" ); |
| 695 | setLibcallName(RTLIB::UDIVREM_I32, "__udivmodsi4" ); |
| 696 | } |
| 697 | |
| 698 | // The half <-> float conversion functions are always soft-float on |
| 699 | // non-watchos platforms, but are needed for some targets which use a |
| 700 | // hard-float calling convention by default. |
| 701 | if (!Subtarget->isTargetWatchABI()) { |
| 702 | if (Subtarget->isAAPCS_ABI()) { |
| 703 | setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_AAPCS); |
| 704 | setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_AAPCS); |
| 705 | setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_AAPCS); |
| 706 | } else { |
| 707 | setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_APCS); |
| 708 | setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_APCS); |
| 709 | setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_APCS); |
| 710 | } |
| 711 | } |
| 712 | |
| 713 | // In EABI, these functions have an __aeabi_ prefix, but in GNUEABI they have |
| 714 | // a __gnu_ prefix (which is the default). |
| 715 | if (Subtarget->isTargetAEABI()) { |
| 716 | static const struct { |
| 717 | const RTLIB::Libcall Op; |
| 718 | const char * const Name; |
| 719 | const CallingConv::ID CC; |
| 720 | } LibraryCalls[] = { |
| 721 | { RTLIB::FPROUND_F32_F16, "__aeabi_f2h" , CallingConv::ARM_AAPCS }, |
| 722 | { RTLIB::FPROUND_F64_F16, "__aeabi_d2h" , CallingConv::ARM_AAPCS }, |
| 723 | { RTLIB::FPEXT_F16_F32, "__aeabi_h2f" , CallingConv::ARM_AAPCS }, |
| 724 | }; |
| 725 | |
| 726 | for (const auto &LC : LibraryCalls) { |
| 727 | setLibcallName(LC.Op, LC.Name); |
| 728 | setLibcallCallingConv(LC.Op, LC.CC); |
| 729 | } |
| 730 | } |
| 731 | |
| 732 | if (Subtarget->isThumb1Only()) |
| 733 | addRegisterClass(MVT::i32, &ARM::tGPRRegClass); |
| 734 | else |
| 735 | addRegisterClass(MVT::i32, &ARM::GPRRegClass); |
| 736 | |
| 737 | if (!Subtarget->useSoftFloat() && !Subtarget->isThumb1Only() && |
| 738 | Subtarget->hasFPRegs()) { |
| 739 | addRegisterClass(MVT::f32, &ARM::SPRRegClass); |
| 740 | addRegisterClass(MVT::f64, &ARM::DPRRegClass); |
| 741 | if (!Subtarget->hasVFP2Base()) |
| 742 | setAllExpand(MVT::f32); |
| 743 | if (!Subtarget->hasFP64()) |
| 744 | setAllExpand(MVT::f64); |
| 745 | } |
| 746 | |
| 747 | if (Subtarget->hasFullFP16()) { |
| 748 | addRegisterClass(MVT::f16, &ARM::HPRRegClass); |
| 749 | setOperationAction(ISD::BITCAST, MVT::i16, Custom); |
| 750 | setOperationAction(ISD::BITCAST, MVT::f16, Custom); |
| 751 | |
| 752 | setOperationAction(ISD::FMINNUM, MVT::f16, Legal); |
| 753 | setOperationAction(ISD::FMAXNUM, MVT::f16, Legal); |
| 754 | } |
| 755 | |
| 756 | if (Subtarget->hasBF16()) { |
| 757 | addRegisterClass(MVT::bf16, &ARM::HPRRegClass); |
| 758 | setAllExpand(MVT::bf16); |
| 759 | if (!Subtarget->hasFullFP16()) |
| 760 | setOperationAction(ISD::BITCAST, MVT::bf16, Custom); |
| 761 | } |
| 762 | |
| 763 | for (MVT VT : MVT::fixedlen_vector_valuetypes()) { |
| 764 | for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) { |
| 765 | setTruncStoreAction(VT, InnerVT, Expand); |
| 766 | addAllExtLoads(VT, InnerVT, Expand); |
| 767 | } |
| 768 | |
| 769 | setOperationAction(ISD::MULHS, VT, Expand); |
| 770 | setOperationAction(ISD::SMUL_LOHI, VT, Expand); |
| 771 | setOperationAction(ISD::MULHU, VT, Expand); |
| 772 | setOperationAction(ISD::UMUL_LOHI, VT, Expand); |
| 773 | |
| 774 | setOperationAction(ISD::BSWAP, VT, Expand); |
| 775 | } |
| 776 | |
| 777 | setOperationAction(ISD::ConstantFP, MVT::f32, Custom); |
| 778 | setOperationAction(ISD::ConstantFP, MVT::f64, Custom); |
| 779 | |
| 780 | setOperationAction(ISD::READ_REGISTER, MVT::i64, Custom); |
| 781 | setOperationAction(ISD::WRITE_REGISTER, MVT::i64, Custom); |
| 782 | |
| 783 | if (Subtarget->hasMVEIntegerOps()) |
| 784 | addMVEVectorTypes(Subtarget->hasMVEFloatOps()); |
| 785 | |
| 786 | // Combine low-overhead loop intrinsics so that we can lower i1 types. |
| 787 | if (Subtarget->hasLOB()) { |
| 788 | setTargetDAGCombine(ISD::BRCOND); |
| 789 | setTargetDAGCombine(ISD::BR_CC); |
| 790 | } |
| 791 | |
| 792 | if (Subtarget->hasNEON()) { |
| 793 | addDRTypeForNEON(MVT::v2f32); |
| 794 | addDRTypeForNEON(MVT::v8i8); |
| 795 | addDRTypeForNEON(MVT::v4i16); |
| 796 | addDRTypeForNEON(MVT::v2i32); |
| 797 | addDRTypeForNEON(MVT::v1i64); |
| 798 | |
| 799 | addQRTypeForNEON(MVT::v4f32); |
| 800 | addQRTypeForNEON(MVT::v2f64); |
| 801 | addQRTypeForNEON(MVT::v16i8); |
| 802 | addQRTypeForNEON(MVT::v8i16); |
| 803 | addQRTypeForNEON(MVT::v4i32); |
| 804 | addQRTypeForNEON(MVT::v2i64); |
| 805 | |
| 806 | if (Subtarget->hasFullFP16()) { |
| 807 | addQRTypeForNEON(MVT::v8f16); |
| 808 | addDRTypeForNEON(MVT::v4f16); |
| 809 | } |
| 810 | |
| 811 | if (Subtarget->hasBF16()) { |
| 812 | addQRTypeForNEON(MVT::v8bf16); |
| 813 | addDRTypeForNEON(MVT::v4bf16); |
| 814 | } |
| 815 | } |
| 816 | |
| 817 | if (Subtarget->hasMVEIntegerOps() || Subtarget->hasNEON()) { |
| 818 | // v2f64 is legal so that QR subregs can be extracted as f64 elements, but |
| 819 | // none of Neon, MVE or VFP supports any arithmetic operations on it. |
| 820 | setOperationAction(ISD::FADD, MVT::v2f64, Expand); |
| 821 | setOperationAction(ISD::FSUB, MVT::v2f64, Expand); |
| 822 | setOperationAction(ISD::FMUL, MVT::v2f64, Expand); |
| 823 | // FIXME: Code duplication: FDIV and FREM are expanded always, see |
| 824 | // ARMTargetLowering::addTypeForNEON method for details. |
| 825 | setOperationAction(ISD::FDIV, MVT::v2f64, Expand); |
| 826 | setOperationAction(ISD::FREM, MVT::v2f64, Expand); |
| 827 | // FIXME: Create unittest. |
| 828 | // In another words, find a way when "copysign" appears in DAG with vector |
| 829 | // operands. |
| 830 | setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand); |
| 831 | // FIXME: Code duplication: SETCC has custom operation action, see |
| 832 | // ARMTargetLowering::addTypeForNEON method for details. |
| 833 | setOperationAction(ISD::SETCC, MVT::v2f64, Expand); |
| 834 | // FIXME: Create unittest for FNEG and for FABS. |
| 835 | setOperationAction(ISD::FNEG, MVT::v2f64, Expand); |
| 836 | setOperationAction(ISD::FABS, MVT::v2f64, Expand); |
| 837 | setOperationAction(ISD::FSQRT, MVT::v2f64, Expand); |
| 838 | setOperationAction(ISD::FSIN, MVT::v2f64, Expand); |
| 839 | setOperationAction(ISD::FCOS, MVT::v2f64, Expand); |
| 840 | setOperationAction(ISD::FPOW, MVT::v2f64, Expand); |
| 841 | setOperationAction(ISD::FLOG, MVT::v2f64, Expand); |
| 842 | setOperationAction(ISD::FLOG2, MVT::v2f64, Expand); |
| 843 | setOperationAction(ISD::FLOG10, MVT::v2f64, Expand); |
| 844 | setOperationAction(ISD::FEXP, MVT::v2f64, Expand); |
| 845 | setOperationAction(ISD::FEXP2, MVT::v2f64, Expand); |
| 846 | // FIXME: Create unittest for FCEIL, FTRUNC, FRINT, FNEARBYINT, FFLOOR. |
| 847 | setOperationAction(ISD::FCEIL, MVT::v2f64, Expand); |
| 848 | setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand); |
| 849 | setOperationAction(ISD::FRINT, MVT::v2f64, Expand); |
| 850 | setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand); |
| 851 | setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand); |
| 852 | setOperationAction(ISD::FMA, MVT::v2f64, Expand); |
| 853 | } |
| 854 | |
| 855 | if (Subtarget->hasNEON()) { |
| 856 | // The same with v4f32. But keep in mind that vadd, vsub, vmul are natively |
| 857 | // supported for v4f32. |
| 858 | setOperationAction(ISD::FSQRT, MVT::v4f32, Expand); |
| 859 | setOperationAction(ISD::FSIN, MVT::v4f32, Expand); |
| 860 | setOperationAction(ISD::FCOS, MVT::v4f32, Expand); |
| 861 | setOperationAction(ISD::FPOW, MVT::v4f32, Expand); |
| 862 | setOperationAction(ISD::FLOG, MVT::v4f32, Expand); |
| 863 | setOperationAction(ISD::FLOG2, MVT::v4f32, Expand); |
| 864 | setOperationAction(ISD::FLOG10, MVT::v4f32, Expand); |
| 865 | setOperationAction(ISD::FEXP, MVT::v4f32, Expand); |
| 866 | setOperationAction(ISD::FEXP2, MVT::v4f32, Expand); |
| 867 | setOperationAction(ISD::FCEIL, MVT::v4f32, Expand); |
| 868 | setOperationAction(ISD::FTRUNC, MVT::v4f32, Expand); |
| 869 | setOperationAction(ISD::FRINT, MVT::v4f32, Expand); |
| 870 | setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand); |
| 871 | setOperationAction(ISD::FFLOOR, MVT::v4f32, Expand); |
| 872 | |
| 873 | // Mark v2f32 intrinsics. |
| 874 | setOperationAction(ISD::FSQRT, MVT::v2f32, Expand); |
| 875 | setOperationAction(ISD::FSIN, MVT::v2f32, Expand); |
| 876 | setOperationAction(ISD::FCOS, MVT::v2f32, Expand); |
| 877 | setOperationAction(ISD::FPOW, MVT::v2f32, Expand); |
| 878 | setOperationAction(ISD::FLOG, MVT::v2f32, Expand); |
| 879 | setOperationAction(ISD::FLOG2, MVT::v2f32, Expand); |
| 880 | setOperationAction(ISD::FLOG10, MVT::v2f32, Expand); |
| 881 | setOperationAction(ISD::FEXP, MVT::v2f32, Expand); |
| 882 | setOperationAction(ISD::FEXP2, MVT::v2f32, Expand); |
| 883 | setOperationAction(ISD::FCEIL, MVT::v2f32, Expand); |
| 884 | setOperationAction(ISD::FTRUNC, MVT::v2f32, Expand); |
| 885 | setOperationAction(ISD::FRINT, MVT::v2f32, Expand); |
| 886 | setOperationAction(ISD::FNEARBYINT, MVT::v2f32, Expand); |
| 887 | setOperationAction(ISD::FFLOOR, MVT::v2f32, Expand); |
| 888 | |
| 889 | // Neon does not support some operations on v1i64 and v2i64 types. |
| 890 | setOperationAction(ISD::MUL, MVT::v1i64, Expand); |
| 891 | // Custom handling for some quad-vector types to detect VMULL. |
| 892 | setOperationAction(ISD::MUL, MVT::v8i16, Custom); |
| 893 | setOperationAction(ISD::MUL, MVT::v4i32, Custom); |
| 894 | setOperationAction(ISD::MUL, MVT::v2i64, Custom); |
| 895 | // Custom handling for some vector types to avoid expensive expansions |
| 896 | setOperationAction(ISD::SDIV, MVT::v4i16, Custom); |
| 897 | setOperationAction(ISD::SDIV, MVT::v8i8, Custom); |
| 898 | setOperationAction(ISD::UDIV, MVT::v4i16, Custom); |
| 899 | setOperationAction(ISD::UDIV, MVT::v8i8, Custom); |
| 900 | // Neon does not have single instruction SINT_TO_FP and UINT_TO_FP with |
| 901 | // a destination type that is wider than the source, and nor does |
| 902 | // it have a FP_TO_[SU]INT instruction with a narrower destination than |
| 903 | // source. |
| 904 | setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom); |
| 905 | setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Custom); |
| 906 | setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom); |
| 907 | setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom); |
| 908 | setOperationAction(ISD::FP_TO_UINT, MVT::v4i16, Custom); |
| 909 | setOperationAction(ISD::FP_TO_UINT, MVT::v8i16, Custom); |
| 910 | setOperationAction(ISD::FP_TO_SINT, MVT::v4i16, Custom); |
| 911 | setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Custom); |
| 912 | |
| 913 | setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand); |
| 914 | setOperationAction(ISD::FP_EXTEND, MVT::v2f64, Expand); |
| 915 | |
| 916 | // NEON does not have single instruction CTPOP for vectors with element |
| 917 | // types wider than 8-bits. However, custom lowering can leverage the |
| 918 | // v8i8/v16i8 vcnt instruction. |
| 919 | setOperationAction(ISD::CTPOP, MVT::v2i32, Custom); |
| 920 | setOperationAction(ISD::CTPOP, MVT::v4i32, Custom); |
| 921 | setOperationAction(ISD::CTPOP, MVT::v4i16, Custom); |
| 922 | setOperationAction(ISD::CTPOP, MVT::v8i16, Custom); |
| 923 | setOperationAction(ISD::CTPOP, MVT::v1i64, Custom); |
| 924 | setOperationAction(ISD::CTPOP, MVT::v2i64, Custom); |
| 925 | |
| 926 | setOperationAction(ISD::CTLZ, MVT::v1i64, Expand); |
| 927 | setOperationAction(ISD::CTLZ, MVT::v2i64, Expand); |
| 928 | |
| 929 | // NEON does not have single instruction CTTZ for vectors. |
| 930 | setOperationAction(ISD::CTTZ, MVT::v8i8, Custom); |
| 931 | setOperationAction(ISD::CTTZ, MVT::v4i16, Custom); |
| 932 | setOperationAction(ISD::CTTZ, MVT::v2i32, Custom); |
| 933 | setOperationAction(ISD::CTTZ, MVT::v1i64, Custom); |
| 934 | |
| 935 | setOperationAction(ISD::CTTZ, MVT::v16i8, Custom); |
| 936 | setOperationAction(ISD::CTTZ, MVT::v8i16, Custom); |
| 937 | setOperationAction(ISD::CTTZ, MVT::v4i32, Custom); |
| 938 | setOperationAction(ISD::CTTZ, MVT::v2i64, Custom); |
| 939 | |
| 940 | setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i8, Custom); |
| 941 | setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i16, Custom); |
| 942 | setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i32, Custom); |
| 943 | setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v1i64, Custom); |
| 944 | |
| 945 | setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v16i8, Custom); |
| 946 | setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i16, Custom); |
| 947 | setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i32, Custom); |
| 948 | setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i64, Custom); |
| 949 | |
| 950 | // NEON only has FMA instructions as of VFP4. |
| 951 | if (!Subtarget->hasVFP4Base()) { |
| 952 | setOperationAction(ISD::FMA, MVT::v2f32, Expand); |
| 953 | setOperationAction(ISD::FMA, MVT::v4f32, Expand); |
| 954 | } |
| 955 | |
| 956 | setTargetDAGCombine(ISD::SHL); |
| 957 | setTargetDAGCombine(ISD::SRL); |
| 958 | setTargetDAGCombine(ISD::SRA); |
| 959 | setTargetDAGCombine(ISD::FP_TO_SINT); |
| 960 | setTargetDAGCombine(ISD::FP_TO_UINT); |
| 961 | setTargetDAGCombine(ISD::FDIV); |
| 962 | setTargetDAGCombine(ISD::LOAD); |
| 963 | |
| 964 | // It is legal to extload from v4i8 to v4i16 or v4i32. |
| 965 | for (MVT Ty : {MVT::v8i8, MVT::v4i8, MVT::v2i8, MVT::v4i16, MVT::v2i16, |
| 966 | MVT::v2i32}) { |
| 967 | for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) { |
| 968 | setLoadExtAction(ISD::EXTLOAD, VT, Ty, Legal); |
| 969 | setLoadExtAction(ISD::ZEXTLOAD, VT, Ty, Legal); |
| 970 | setLoadExtAction(ISD::SEXTLOAD, VT, Ty, Legal); |
| 971 | } |
| 972 | } |
| 973 | } |
| 974 | |
| 975 | if (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) { |
| 976 | setTargetDAGCombine(ISD::BUILD_VECTOR); |
| 977 | setTargetDAGCombine(ISD::VECTOR_SHUFFLE); |
| 978 | setTargetDAGCombine(ISD::INSERT_VECTOR_ELT); |
| 979 | setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT); |
| 980 | setTargetDAGCombine(ISD::STORE); |
| 981 | setTargetDAGCombine(ISD::SIGN_EXTEND); |
| 982 | setTargetDAGCombine(ISD::ZERO_EXTEND); |
| 983 | setTargetDAGCombine(ISD::ANY_EXTEND); |
| 984 | setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); |
| 985 | setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); |
| 986 | setTargetDAGCombine(ISD::INTRINSIC_VOID); |
| 987 | setTargetDAGCombine(ISD::VECREDUCE_ADD); |
| 988 | setTargetDAGCombine(ISD::ADD); |
| 989 | setTargetDAGCombine(ISD::BITCAST); |
| 990 | } |
| 991 | if (Subtarget->hasMVEIntegerOps()) { |
| 992 | setTargetDAGCombine(ISD::SMIN); |
| 993 | setTargetDAGCombine(ISD::UMIN); |
| 994 | setTargetDAGCombine(ISD::SMAX); |
| 995 | setTargetDAGCombine(ISD::UMAX); |
| 996 | setTargetDAGCombine(ISD::FP_EXTEND); |
| 997 | setTargetDAGCombine(ISD::SELECT); |
| 998 | setTargetDAGCombine(ISD::SELECT_CC); |
| 999 | } |
| 1000 | |
| 1001 | if (!Subtarget->hasFP64()) { |
| 1002 | // When targeting a floating-point unit with only single-precision |
| 1003 | // operations, f64 is legal for the few double-precision instructions which |
| 1004 | // are present However, no double-precision operations other than moves, |
| 1005 | // loads and stores are provided by the hardware. |
| 1006 | setOperationAction(ISD::FADD, MVT::f64, Expand); |
| 1007 | setOperationAction(ISD::FSUB, MVT::f64, Expand); |
| 1008 | setOperationAction(ISD::FMUL, MVT::f64, Expand); |
| 1009 | setOperationAction(ISD::FMA, MVT::f64, Expand); |
| 1010 | setOperationAction(ISD::FDIV, MVT::f64, Expand); |
| 1011 | setOperationAction(ISD::FREM, MVT::f64, Expand); |
| 1012 | setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); |
| 1013 | setOperationAction(ISD::FGETSIGN, MVT::f64, Expand); |
| 1014 | setOperationAction(ISD::FNEG, MVT::f64, Expand); |
| 1015 | setOperationAction(ISD::FABS, MVT::f64, Expand); |
| 1016 | setOperationAction(ISD::FSQRT, MVT::f64, Expand); |
| 1017 | setOperationAction(ISD::FSIN, MVT::f64, Expand); |
| 1018 | setOperationAction(ISD::FCOS, MVT::f64, Expand); |
| 1019 | setOperationAction(ISD::FPOW, MVT::f64, Expand); |
| 1020 | setOperationAction(ISD::FLOG, MVT::f64, Expand); |
| 1021 | setOperationAction(ISD::FLOG2, MVT::f64, Expand); |
| 1022 | setOperationAction(ISD::FLOG10, MVT::f64, Expand); |
| 1023 | setOperationAction(ISD::FEXP, MVT::f64, Expand); |
| 1024 | setOperationAction(ISD::FEXP2, MVT::f64, Expand); |
| 1025 | setOperationAction(ISD::FCEIL, MVT::f64, Expand); |
| 1026 | setOperationAction(ISD::FTRUNC, MVT::f64, Expand); |
| 1027 | setOperationAction(ISD::FRINT, MVT::f64, Expand); |
| 1028 | setOperationAction(ISD::FNEARBYINT, MVT::f64, Expand); |
| 1029 | setOperationAction(ISD::FFLOOR, MVT::f64, Expand); |
| 1030 | setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); |
| 1031 | setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); |
| 1032 | setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); |
| 1033 | setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); |
| 1034 | setOperationAction(ISD::FP_TO_SINT, MVT::f64, Custom); |
| 1035 | setOperationAction(ISD::FP_TO_UINT, MVT::f64, Custom); |
| 1036 | setOperationAction(ISD::FP_ROUND, MVT::f32, Custom); |
| 1037 | setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom); |
| 1038 | setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom); |
| 1039 | setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::f64, Custom); |
| 1040 | setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::f64, Custom); |
| 1041 | setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Custom); |
| 1042 | } |
| 1043 | |
| 1044 | if (!Subtarget->hasFP64() || !Subtarget->hasFPARMv8Base()) { |
| 1045 | setOperationAction(ISD::FP_EXTEND, MVT::f64, Custom); |
| 1046 | setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Custom); |
| 1047 | if (Subtarget->hasFullFP16()) { |
| 1048 | setOperationAction(ISD::FP_ROUND, MVT::f16, Custom); |
| 1049 | setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Custom); |
| 1050 | } |
| 1051 | } |
| 1052 | |
| 1053 | if (!Subtarget->hasFP16()) { |
| 1054 | setOperationAction(ISD::FP_EXTEND, MVT::f32, Custom); |
| 1055 | setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Custom); |
| 1056 | } |
| 1057 | |
| 1058 | computeRegisterProperties(Subtarget->getRegisterInfo()); |
| 1059 | |
| 1060 | // ARM does not have floating-point extending loads. |
| 1061 | for (MVT VT : MVT::fp_valuetypes()) { |
| 1062 | setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand); |
| 1063 | setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand); |
| 1064 | } |
| 1065 | |
| 1066 | // ... or truncating stores |
| 1067 | setTruncStoreAction(MVT::f64, MVT::f32, Expand); |
| 1068 | setTruncStoreAction(MVT::f32, MVT::f16, Expand); |
| 1069 | setTruncStoreAction(MVT::f64, MVT::f16, Expand); |
| 1070 | |
| 1071 | // ARM does not have i1 sign extending load. |
| 1072 | for (MVT VT : MVT::integer_valuetypes()) |
| 1073 | setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); |
| 1074 | |
| 1075 | // ARM supports all 4 flavors of integer indexed load / store. |
| 1076 | if (!Subtarget->isThumb1Only()) { |
| 1077 | for (unsigned im = (unsigned)ISD::PRE_INC; |
| 1078 | im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { |
| 1079 | setIndexedLoadAction(im, MVT::i1, Legal); |
| 1080 | setIndexedLoadAction(im, MVT::i8, Legal); |
| 1081 | setIndexedLoadAction(im, MVT::i16, Legal); |
| 1082 | setIndexedLoadAction(im, MVT::i32, Legal); |
| 1083 | setIndexedStoreAction(im, MVT::i1, Legal); |
| 1084 | setIndexedStoreAction(im, MVT::i8, Legal); |
| 1085 | setIndexedStoreAction(im, MVT::i16, Legal); |
| 1086 | setIndexedStoreAction(im, MVT::i32, Legal); |
| 1087 | } |
| 1088 | } else { |
| 1089 | // Thumb-1 has limited post-inc load/store support - LDM r0!, {r1}. |
| 1090 | setIndexedLoadAction(ISD::POST_INC, MVT::i32, Legal); |
| 1091 | setIndexedStoreAction(ISD::POST_INC, MVT::i32, Legal); |
| 1092 | } |
| 1093 | |
| 1094 | setOperationAction(ISD::SADDO, MVT::i32, Custom); |
| 1095 | setOperationAction(ISD::UADDO, MVT::i32, Custom); |
| 1096 | setOperationAction(ISD::SSUBO, MVT::i32, Custom); |
| 1097 | setOperationAction(ISD::USUBO, MVT::i32, Custom); |
| 1098 | |
| 1099 | setOperationAction(ISD::ADDCARRY, MVT::i32, Custom); |
| 1100 | setOperationAction(ISD::SUBCARRY, MVT::i32, Custom); |
| 1101 | if (Subtarget->hasDSP()) { |
| 1102 | setOperationAction(ISD::SADDSAT, MVT::i8, Custom); |
| 1103 | setOperationAction(ISD::SSUBSAT, MVT::i8, Custom); |
| 1104 | setOperationAction(ISD::SADDSAT, MVT::i16, Custom); |
| 1105 | setOperationAction(ISD::SSUBSAT, MVT::i16, Custom); |
| 1106 | } |
| 1107 | if (Subtarget->hasBaseDSP()) { |
| 1108 | setOperationAction(ISD::SADDSAT, MVT::i32, Legal); |
| 1109 | setOperationAction(ISD::SSUBSAT, MVT::i32, Legal); |
| 1110 | } |
| 1111 | |
| 1112 | // i64 operation support. |
| 1113 | setOperationAction(ISD::MUL, MVT::i64, Expand); |
| 1114 | setOperationAction(ISD::MULHU, MVT::i32, Expand); |
| 1115 | if (Subtarget->isThumb1Only()) { |
| 1116 | setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); |
| 1117 | setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); |
| 1118 | } |
| 1119 | if (Subtarget->isThumb1Only() || !Subtarget->hasV6Ops() |
| 1120 | || (Subtarget->isThumb2() && !Subtarget->hasDSP())) |
| 1121 | setOperationAction(ISD::MULHS, MVT::i32, Expand); |
| 1122 | |
| 1123 | setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); |
| 1124 | setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); |
| 1125 | setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); |
| 1126 | setOperationAction(ISD::SRL, MVT::i64, Custom); |
| 1127 | setOperationAction(ISD::SRA, MVT::i64, Custom); |
| 1128 | setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); |
| 1129 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom); |
| 1130 | setOperationAction(ISD::LOAD, MVT::i64, Custom); |
| 1131 | setOperationAction(ISD::STORE, MVT::i64, Custom); |
| 1132 | |
| 1133 | // MVE lowers 64 bit shifts to lsll and lsrl |
| 1134 | // assuming that ISD::SRL and SRA of i64 are already marked custom |
| 1135 | if (Subtarget->hasMVEIntegerOps()) |
| 1136 | setOperationAction(ISD::SHL, MVT::i64, Custom); |
| 1137 | |
| 1138 | // Expand to __aeabi_l{lsl,lsr,asr} calls for Thumb1. |
| 1139 | if (Subtarget->isThumb1Only()) { |
| 1140 | setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand); |
| 1141 | setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand); |
| 1142 | setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand); |
| 1143 | } |
| 1144 | |
| 1145 | if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops()) |
| 1146 | setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); |
| 1147 | |
| 1148 | // ARM does not have ROTL. |
| 1149 | setOperationAction(ISD::ROTL, MVT::i32, Expand); |
| 1150 | for (MVT VT : MVT::fixedlen_vector_valuetypes()) { |
| 1151 | setOperationAction(ISD::ROTL, VT, Expand); |
| 1152 | setOperationAction(ISD::ROTR, VT, Expand); |
| 1153 | } |
| 1154 | setOperationAction(ISD::CTTZ, MVT::i32, Custom); |
| 1155 | setOperationAction(ISD::CTPOP, MVT::i32, Expand); |
| 1156 | if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only()) { |
| 1157 | setOperationAction(ISD::CTLZ, MVT::i32, Expand); |
| 1158 | setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, LibCall); |
| 1159 | } |
| 1160 | |
| 1161 | // @llvm.readcyclecounter requires the Performance Monitors extension. |
| 1162 | // Default to the 0 expansion on unsupported platforms. |
| 1163 | // FIXME: Technically there are older ARM CPUs that have |
| 1164 | // implementation-specific ways of obtaining this information. |
| 1165 | if (Subtarget->hasPerfMon()) |
| 1166 | setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Custom); |
| 1167 | |
| 1168 | // Only ARMv6 has BSWAP. |
| 1169 | if (!Subtarget->hasV6Ops()) |
| 1170 | setOperationAction(ISD::BSWAP, MVT::i32, Expand); |
| 1171 | |
| 1172 | bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivideInThumbMode() |
| 1173 | : Subtarget->hasDivideInARMMode(); |
| 1174 | if (!hasDivide) { |
| 1175 | // These are expanded into libcalls if the cpu doesn't have HW divider. |
| 1176 | setOperationAction(ISD::SDIV, MVT::i32, LibCall); |
| 1177 | setOperationAction(ISD::UDIV, MVT::i32, LibCall); |
| 1178 | } |
| 1179 | |
| 1180 | if (Subtarget->isTargetWindows() && !Subtarget->hasDivideInThumbMode()) { |
| 1181 | setOperationAction(ISD::SDIV, MVT::i32, Custom); |
| 1182 | setOperationAction(ISD::UDIV, MVT::i32, Custom); |
| 1183 | |
| 1184 | setOperationAction(ISD::SDIV, MVT::i64, Custom); |
| 1185 | setOperationAction(ISD::UDIV, MVT::i64, Custom); |
| 1186 | } |
| 1187 | |
| 1188 | setOperationAction(ISD::SREM, MVT::i32, Expand); |
| 1189 | setOperationAction(ISD::UREM, MVT::i32, Expand); |
| 1190 | |
| 1191 | // Register based DivRem for AEABI (RTABI 4.2) |
| 1192 | if (Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() || |
| 1193 | Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() || |
| 1194 | Subtarget->isTargetWindows()) { |
| 1195 | setOperationAction(ISD::SREM, MVT::i64, Custom); |
| 1196 | setOperationAction(ISD::UREM, MVT::i64, Custom); |
| 1197 | HasStandaloneRem = false; |
| 1198 | |
| 1199 | if (Subtarget->isTargetWindows()) { |
| 1200 | const struct { |
| 1201 | const RTLIB::Libcall Op; |
| 1202 | const char * const Name; |
| 1203 | const CallingConv::ID CC; |
| 1204 | } LibraryCalls[] = { |
| 1205 | { RTLIB::SDIVREM_I8, "__rt_sdiv" , CallingConv::ARM_AAPCS }, |
| 1206 | { RTLIB::SDIVREM_I16, "__rt_sdiv" , CallingConv::ARM_AAPCS }, |
| 1207 | { RTLIB::SDIVREM_I32, "__rt_sdiv" , CallingConv::ARM_AAPCS }, |
| 1208 | { RTLIB::SDIVREM_I64, "__rt_sdiv64" , CallingConv::ARM_AAPCS }, |
| 1209 | |
| 1210 | { RTLIB::UDIVREM_I8, "__rt_udiv" , CallingConv::ARM_AAPCS }, |
| 1211 | { RTLIB::UDIVREM_I16, "__rt_udiv" , CallingConv::ARM_AAPCS }, |
| 1212 | { RTLIB::UDIVREM_I32, "__rt_udiv" , CallingConv::ARM_AAPCS }, |
| 1213 | { RTLIB::UDIVREM_I64, "__rt_udiv64" , CallingConv::ARM_AAPCS }, |
| 1214 | }; |
| 1215 | |
| 1216 | for (const auto &LC : LibraryCalls) { |
| 1217 | setLibcallName(LC.Op, LC.Name); |
| 1218 | setLibcallCallingConv(LC.Op, LC.CC); |
| 1219 | } |
| 1220 | } else { |
| 1221 | const struct { |
| 1222 | const RTLIB::Libcall Op; |
| 1223 | const char * const Name; |
| 1224 | const CallingConv::ID CC; |
| 1225 | } LibraryCalls[] = { |
| 1226 | { RTLIB::SDIVREM_I8, "__aeabi_idivmod" , CallingConv::ARM_AAPCS }, |
| 1227 | { RTLIB::SDIVREM_I16, "__aeabi_idivmod" , CallingConv::ARM_AAPCS }, |
| 1228 | { RTLIB::SDIVREM_I32, "__aeabi_idivmod" , CallingConv::ARM_AAPCS }, |
| 1229 | { RTLIB::SDIVREM_I64, "__aeabi_ldivmod" , CallingConv::ARM_AAPCS }, |
| 1230 | |
| 1231 | { RTLIB::UDIVREM_I8, "__aeabi_uidivmod" , CallingConv::ARM_AAPCS }, |
| 1232 | { RTLIB::UDIVREM_I16, "__aeabi_uidivmod" , CallingConv::ARM_AAPCS }, |
| 1233 | { RTLIB::UDIVREM_I32, "__aeabi_uidivmod" , CallingConv::ARM_AAPCS }, |
| 1234 | { RTLIB::UDIVREM_I64, "__aeabi_uldivmod" , CallingConv::ARM_AAPCS }, |
| 1235 | }; |
| 1236 | |
| 1237 | for (const auto &LC : LibraryCalls) { |
| 1238 | setLibcallName(LC.Op, LC.Name); |
| 1239 | setLibcallCallingConv(LC.Op, LC.CC); |
| 1240 | } |
| 1241 | } |
| 1242 | |
| 1243 | setOperationAction(ISD::SDIVREM, MVT::i32, Custom); |
| 1244 | setOperationAction(ISD::UDIVREM, MVT::i32, Custom); |
| 1245 | setOperationAction(ISD::SDIVREM, MVT::i64, Custom); |
| 1246 | setOperationAction(ISD::UDIVREM, MVT::i64, Custom); |
| 1247 | } else { |
| 1248 | setOperationAction(ISD::SDIVREM, MVT::i32, Expand); |
| 1249 | setOperationAction(ISD::UDIVREM, MVT::i32, Expand); |
| 1250 | } |
| 1251 | |
| 1252 | if (Subtarget->getTargetTriple().isOSMSVCRT()) { |
| 1253 | // MSVCRT doesn't have powi; fall back to pow |
| 1254 | setLibcallName(RTLIB::POWI_F32, nullptr); |
| 1255 | setLibcallName(RTLIB::POWI_F64, nullptr); |
| 1256 | } |
| 1257 | |
| 1258 | setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); |
| 1259 | setOperationAction(ISD::ConstantPool, MVT::i32, Custom); |
| 1260 | setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); |
| 1261 | setOperationAction(ISD::BlockAddress, MVT::i32, Custom); |
| 1262 | |
| 1263 | setOperationAction(ISD::TRAP, MVT::Other, Legal); |
| 1264 | setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal); |
| 1265 | |
| 1266 | // Use the default implementation. |
| 1267 | setOperationAction(ISD::VASTART, MVT::Other, Custom); |
| 1268 | setOperationAction(ISD::VAARG, MVT::Other, Expand); |
| 1269 | setOperationAction(ISD::VACOPY, MVT::Other, Expand); |
| 1270 | setOperationAction(ISD::VAEND, MVT::Other, Expand); |
| 1271 | setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); |
| 1272 | setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); |
| 1273 | |
| 1274 | if (Subtarget->isTargetWindows()) |
| 1275 | setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom); |
| 1276 | else |
| 1277 | setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); |
| 1278 | |
| 1279 | // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use |
| 1280 | // the default expansion. |
| 1281 | InsertFencesForAtomic = false; |
| 1282 | if (Subtarget->hasAnyDataBarrier() && |
| 1283 | (!Subtarget->isThumb() || Subtarget->hasV8MBaselineOps())) { |
| 1284 | // ATOMIC_FENCE needs custom lowering; the others should have been expanded |
| 1285 | // to ldrex/strex loops already. |
| 1286 | setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); |
| 1287 | if (!Subtarget->isThumb() || !Subtarget->isMClass()) |
| 1288 | setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom); |
| 1289 | |
| 1290 | // On v8, we have particularly efficient implementations of atomic fences |
| 1291 | // if they can be combined with nearby atomic loads and stores. |
| 1292 | if (!Subtarget->hasAcquireRelease() || |
| 1293 | getTargetMachine().getOptLevel() == 0) { |
| 1294 | // Automatically insert fences (dmb ish) around ATOMIC_SWAP etc. |
| 1295 | InsertFencesForAtomic = true; |
| 1296 | } |
| 1297 | } else { |
| 1298 | // If there's anything we can use as a barrier, go through custom lowering |
| 1299 | // for ATOMIC_FENCE. |
| 1300 | // If target has DMB in thumb, Fences can be inserted. |
| 1301 | if (Subtarget->hasDataBarrier()) |
| 1302 | InsertFencesForAtomic = true; |
| 1303 | |
| 1304 | setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, |
| 1305 | Subtarget->hasAnyDataBarrier() ? Custom : Expand); |
| 1306 | |
| 1307 | // Set them all for expansion, which will force libcalls. |
| 1308 | setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Expand); |
| 1309 | setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Expand); |
| 1310 | setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Expand); |
| 1311 | setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Expand); |
| 1312 | setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Expand); |
| 1313 | setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Expand); |
| 1314 | setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Expand); |
| 1315 | setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand); |
| 1316 | setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Expand); |
| 1317 | setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Expand); |
| 1318 | setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Expand); |
| 1319 | setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Expand); |
| 1320 | // Mark ATOMIC_LOAD and ATOMIC_STORE custom so we can handle the |
| 1321 | // Unordered/Monotonic case. |
| 1322 | if (!InsertFencesForAtomic) { |
| 1323 | setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom); |
| 1324 | setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom); |
| 1325 | } |
| 1326 | } |
| 1327 | |
| 1328 | setOperationAction(ISD::PREFETCH, MVT::Other, Custom); |
| 1329 | |
| 1330 | // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes. |
| 1331 | if (!Subtarget->hasV6Ops()) { |
| 1332 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); |
| 1333 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); |
| 1334 | } |
| 1335 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); |
| 1336 | |
| 1337 | if (!Subtarget->useSoftFloat() && Subtarget->hasFPRegs() && |
| 1338 | !Subtarget->isThumb1Only()) { |
| 1339 | // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR |
| 1340 | // iff target supports vfp2. |
| 1341 | setOperationAction(ISD::BITCAST, MVT::i64, Custom); |
| 1342 | setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); |
| 1343 | } |
| 1344 | |
| 1345 | // We want to custom lower some of our intrinsics. |
| 1346 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); |
| 1347 | setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); |
| 1348 | setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); |
| 1349 | setOperationAction(ISD::EH_SJLJ_SETUP_DISPATCH, MVT::Other, Custom); |
| 1350 | if (Subtarget->useSjLjEH()) |
| 1351 | setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume" ); |
| 1352 | |
| 1353 | setOperationAction(ISD::SETCC, MVT::i32, Expand); |
| 1354 | setOperationAction(ISD::SETCC, MVT::f32, Expand); |
| 1355 | setOperationAction(ISD::SETCC, MVT::f64, Expand); |
| 1356 | setOperationAction(ISD::SELECT, MVT::i32, Custom); |
| 1357 | setOperationAction(ISD::SELECT, MVT::f32, Custom); |
| 1358 | setOperationAction(ISD::SELECT, MVT::f64, Custom); |
| 1359 | setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); |
| 1360 | setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); |
| 1361 | setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); |
| 1362 | if (Subtarget->hasFullFP16()) { |
| 1363 | setOperationAction(ISD::SETCC, MVT::f16, Expand); |
| 1364 | setOperationAction(ISD::SELECT, MVT::f16, Custom); |
| 1365 | setOperationAction(ISD::SELECT_CC, MVT::f16, Custom); |
| 1366 | } |
| 1367 | |
| 1368 | setOperationAction(ISD::SETCCCARRY, MVT::i32, Custom); |
| 1369 | |
| 1370 | setOperationAction(ISD::BRCOND, MVT::Other, Custom); |
| 1371 | setOperationAction(ISD::BR_CC, MVT::i32, Custom); |
| 1372 | if (Subtarget->hasFullFP16()) |
| 1373 | setOperationAction(ISD::BR_CC, MVT::f16, Custom); |
| 1374 | setOperationAction(ISD::BR_CC, MVT::f32, Custom); |
| 1375 | setOperationAction(ISD::BR_CC, MVT::f64, Custom); |
| 1376 | setOperationAction(ISD::BR_JT, MVT::Other, Custom); |
| 1377 | |
| 1378 | // We don't support sin/cos/fmod/copysign/pow |
| 1379 | setOperationAction(ISD::FSIN, MVT::f64, Expand); |
| 1380 | setOperationAction(ISD::FSIN, MVT::f32, Expand); |
| 1381 | setOperationAction(ISD::FCOS, MVT::f32, Expand); |
| 1382 | setOperationAction(ISD::FCOS, MVT::f64, Expand); |
| 1383 | setOperationAction(ISD::FSINCOS, MVT::f64, Expand); |
| 1384 | setOperationAction(ISD::FSINCOS, MVT::f32, Expand); |
| 1385 | setOperationAction(ISD::FREM, MVT::f64, Expand); |
| 1386 | setOperationAction(ISD::FREM, MVT::f32, Expand); |
| 1387 | if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2Base() && |
| 1388 | !Subtarget->isThumb1Only()) { |
| 1389 | setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); |
| 1390 | setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); |
| 1391 | } |
| 1392 | setOperationAction(ISD::FPOW, MVT::f64, Expand); |
| 1393 | setOperationAction(ISD::FPOW, MVT::f32, Expand); |
| 1394 | |
| 1395 | if (!Subtarget->hasVFP4Base()) { |
| 1396 | setOperationAction(ISD::FMA, MVT::f64, Expand); |
| 1397 | setOperationAction(ISD::FMA, MVT::f32, Expand); |
| 1398 | } |
| 1399 | |
| 1400 | // Various VFP goodness |
| 1401 | if (!Subtarget->useSoftFloat() && !Subtarget->isThumb1Only()) { |
| 1402 | // FP-ARMv8 adds f64 <-> f16 conversion. Before that it should be expanded. |
| 1403 | if (!Subtarget->hasFPARMv8Base() || !Subtarget->hasFP64()) { |
| 1404 | setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand); |
| 1405 | setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand); |
| 1406 | } |
| 1407 | |
| 1408 | // fp16 is a special v7 extension that adds f16 <-> f32 conversions. |
| 1409 | if (!Subtarget->hasFP16()) { |
| 1410 | setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand); |
| 1411 | setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand); |
| 1412 | } |
| 1413 | |
| 1414 | // Strict floating-point comparisons need custom lowering. |
| 1415 | setOperationAction(ISD::STRICT_FSETCC, MVT::f16, Custom); |
| 1416 | setOperationAction(ISD::STRICT_FSETCCS, MVT::f16, Custom); |
| 1417 | setOperationAction(ISD::STRICT_FSETCC, MVT::f32, Custom); |
| 1418 | setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Custom); |
| 1419 | setOperationAction(ISD::STRICT_FSETCC, MVT::f64, Custom); |
| 1420 | setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Custom); |
| 1421 | } |
| 1422 | |
| 1423 | // Use __sincos_stret if available. |
| 1424 | if (getLibcallName(RTLIB::SINCOS_STRET_F32) != nullptr && |
| 1425 | getLibcallName(RTLIB::SINCOS_STRET_F64) != nullptr) { |
| 1426 | setOperationAction(ISD::FSINCOS, MVT::f64, Custom); |
| 1427 | setOperationAction(ISD::FSINCOS, MVT::f32, Custom); |
| 1428 | } |
| 1429 | |
| 1430 | // FP-ARMv8 implements a lot of rounding-like FP operations. |
| 1431 | if (Subtarget->hasFPARMv8Base()) { |
| 1432 | setOperationAction(ISD::FFLOOR, MVT::f32, Legal); |
| 1433 | setOperationAction(ISD::FCEIL, MVT::f32, Legal); |
| 1434 | setOperationAction(ISD::FROUND, MVT::f32, Legal); |
| 1435 | setOperationAction(ISD::FTRUNC, MVT::f32, Legal); |
| 1436 | setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal); |
| 1437 | setOperationAction(ISD::FRINT, MVT::f32, Legal); |
| 1438 | setOperationAction(ISD::FMINNUM, MVT::f32, Legal); |
| 1439 | setOperationAction(ISD::FMAXNUM, MVT::f32, Legal); |
| 1440 | if (Subtarget->hasNEON()) { |
| 1441 | setOperationAction(ISD::FMINNUM, MVT::v2f32, Legal); |
| 1442 | setOperationAction(ISD::FMAXNUM, MVT::v2f32, Legal); |
| 1443 | setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal); |
| 1444 | setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal); |
| 1445 | } |
| 1446 | |
| 1447 | if (Subtarget->hasFP64()) { |
| 1448 | setOperationAction(ISD::FFLOOR, MVT::f64, Legal); |
| 1449 | setOperationAction(ISD::FCEIL, MVT::f64, Legal); |
| 1450 | setOperationAction(ISD::FROUND, MVT::f64, Legal); |
| 1451 | setOperationAction(ISD::FTRUNC, MVT::f64, Legal); |
| 1452 | setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal); |
| 1453 | setOperationAction(ISD::FRINT, MVT::f64, Legal); |
| 1454 | setOperationAction(ISD::FMINNUM, MVT::f64, Legal); |
| 1455 | setOperationAction(ISD::FMAXNUM, MVT::f64, Legal); |
| 1456 | } |
| 1457 | } |
| 1458 | |
| 1459 | // FP16 often need to be promoted to call lib functions |
| 1460 | if (Subtarget->hasFullFP16()) { |
| 1461 | setOperationAction(ISD::FREM, MVT::f16, Promote); |
| 1462 | setOperationAction(ISD::FCOPYSIGN, MVT::f16, Expand); |
| 1463 | setOperationAction(ISD::FSIN, MVT::f16, Promote); |
| 1464 | setOperationAction(ISD::FCOS, MVT::f16, Promote); |
| 1465 | setOperationAction(ISD::FSINCOS, MVT::f16, Promote); |
| 1466 | setOperationAction(ISD::FPOWI, MVT::f16, Promote); |
| 1467 | setOperationAction(ISD::FPOW, MVT::f16, Promote); |
| 1468 | setOperationAction(ISD::FEXP, MVT::f16, Promote); |
| 1469 | setOperationAction(ISD::FEXP2, MVT::f16, Promote); |
| 1470 | setOperationAction(ISD::FLOG, MVT::f16, Promote); |
| 1471 | setOperationAction(ISD::FLOG10, MVT::f16, Promote); |
| 1472 | setOperationAction(ISD::FLOG2, MVT::f16, Promote); |
| 1473 | |
| 1474 | setOperationAction(ISD::FROUND, MVT::f16, Legal); |
| 1475 | } |
| 1476 | |
| 1477 | if (Subtarget->hasNEON()) { |
| 1478 | // vmin and vmax aren't available in a scalar form, so we can use |
| 1479 | // a NEON instruction with an undef lane instead. This has a performance |
| 1480 | // penalty on some cores, so we don't do this unless we have been |
| 1481 | // asked to by the core tuning model. |
| 1482 | if (Subtarget->useNEONForSinglePrecisionFP()) { |
| 1483 | setOperationAction(ISD::FMINIMUM, MVT::f32, Legal); |
| 1484 | setOperationAction(ISD::FMAXIMUM, MVT::f32, Legal); |
| 1485 | setOperationAction(ISD::FMINIMUM, MVT::f16, Legal); |
| 1486 | setOperationAction(ISD::FMAXIMUM, MVT::f16, Legal); |
| 1487 | } |
| 1488 | setOperationAction(ISD::FMINIMUM, MVT::v2f32, Legal); |
| 1489 | setOperationAction(ISD::FMAXIMUM, MVT::v2f32, Legal); |
| 1490 | setOperationAction(ISD::FMINIMUM, MVT::v4f32, Legal); |
| 1491 | setOperationAction(ISD::FMAXIMUM, MVT::v4f32, Legal); |
| 1492 | |
| 1493 | if (Subtarget->hasFullFP16()) { |
| 1494 | setOperationAction(ISD::FMINNUM, MVT::v4f16, Legal); |
| 1495 | setOperationAction(ISD::FMAXNUM, MVT::v4f16, Legal); |
| 1496 | setOperationAction(ISD::FMINNUM, MVT::v8f16, Legal); |
| 1497 | setOperationAction(ISD::FMAXNUM, MVT::v8f16, Legal); |
| 1498 | |
| 1499 | setOperationAction(ISD::FMINIMUM, MVT::v4f16, Legal); |
| 1500 | setOperationAction(ISD::FMAXIMUM, MVT::v4f16, Legal); |
| 1501 | setOperationAction(ISD::FMINIMUM, MVT::v8f16, Legal); |
| 1502 | setOperationAction(ISD::FMAXIMUM, MVT::v8f16, Legal); |
| 1503 | } |
| 1504 | } |
| 1505 | |
| 1506 | // We have target-specific dag combine patterns for the following nodes: |
| 1507 | // ARMISD::VMOVRRD - No need to call setTargetDAGCombine |
| 1508 | setTargetDAGCombine(ISD::ADD); |
| 1509 | setTargetDAGCombine(ISD::SUB); |
| 1510 | setTargetDAGCombine(ISD::MUL); |
| 1511 | setTargetDAGCombine(ISD::AND); |
| 1512 | setTargetDAGCombine(ISD::OR); |
| 1513 | setTargetDAGCombine(ISD::XOR); |
| 1514 | |
| 1515 | if (Subtarget->hasMVEIntegerOps()) |
| 1516 | setTargetDAGCombine(ISD::VSELECT); |
| 1517 | |
| 1518 | if (Subtarget->hasV6Ops()) |
| 1519 | setTargetDAGCombine(ISD::SRL); |
| 1520 | if (Subtarget->isThumb1Only()) |
| 1521 | setTargetDAGCombine(ISD::SHL); |
| 1522 | |
| 1523 | setStackPointerRegisterToSaveRestore(ARM::SP); |
| 1524 | |
| 1525 | if (Subtarget->useSoftFloat() || Subtarget->isThumb1Only() || |
| 1526 | !Subtarget->hasVFP2Base() || Subtarget->hasMinSize()) |
| 1527 | setSchedulingPreference(Sched::RegPressure); |
| 1528 | else |
| 1529 | setSchedulingPreference(Sched::Hybrid); |
| 1530 | |
| 1531 | //// temporary - rewrite interface to use type |
| 1532 | MaxStoresPerMemset = 8; |
| 1533 | MaxStoresPerMemsetOptSize = 4; |
| 1534 | MaxStoresPerMemcpy = 4; // For @llvm.memcpy -> sequence of stores |
| 1535 | MaxStoresPerMemcpyOptSize = 2; |
| 1536 | MaxStoresPerMemmove = 4; // For @llvm.memmove -> sequence of stores |
| 1537 | MaxStoresPerMemmoveOptSize = 2; |
| 1538 | |
| 1539 | // On ARM arguments smaller than 4 bytes are extended, so all arguments |
| 1540 | // are at least 4 bytes aligned. |
| 1541 | setMinStackArgumentAlignment(Align(4)); |
| 1542 | |
| 1543 | // Prefer likely predicted branches to selects on out-of-order cores. |
| 1544 | PredictableSelectIsExpensive = Subtarget->getSchedModel().isOutOfOrder(); |
| 1545 | |
| 1546 | setPrefLoopAlignment(Align(1ULL << Subtarget->getPrefLoopLogAlignment())); |
| 1547 | |
| 1548 | setMinFunctionAlignment(Subtarget->isThumb() ? Align(2) : Align(4)); |
| 1549 | |
| 1550 | if (Subtarget->isThumb() || Subtarget->isThumb2()) |
| 1551 | setTargetDAGCombine(ISD::ABS); |
| 1552 | } |
| 1553 | |
| 1554 | bool ARMTargetLowering::useSoftFloat() const { |
| 1555 | return Subtarget->useSoftFloat(); |
| 1556 | } |
| 1557 | |
| 1558 | // FIXME: It might make sense to define the representative register class as the |
| 1559 | // nearest super-register that has a non-null superset. For example, DPR_VFP2 is |
| 1560 | // a super-register of SPR, and DPR is a superset if DPR_VFP2. Consequently, |
| 1561 | // SPR's representative would be DPR_VFP2. This should work well if register |
| 1562 | // pressure tracking were modified such that a register use would increment the |
| 1563 | // pressure of the register class's representative and all of it's super |
| 1564 | // classes' representatives transitively. We have not implemented this because |
| 1565 | // of the difficulty prior to coalescing of modeling operand register classes |
| 1566 | // due to the common occurrence of cross class copies and subregister insertions |
| 1567 | // and extractions. |
| 1568 | std::pair<const TargetRegisterClass *, uint8_t> |
| 1569 | ARMTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI, |
| 1570 | MVT VT) const { |
| 1571 | const TargetRegisterClass *RRC = nullptr; |
| 1572 | uint8_t Cost = 1; |
| 1573 | switch (VT.SimpleTy) { |
| 1574 | default: |
| 1575 | return TargetLowering::findRepresentativeClass(TRI, VT); |
| 1576 | // Use DPR as representative register class for all floating point |
| 1577 | // and vector types. Since there are 32 SPR registers and 32 DPR registers so |
| 1578 | // the cost is 1 for both f32 and f64. |
| 1579 | case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16: |
| 1580 | case MVT::v2i32: case MVT::v1i64: case MVT::v2f32: |
| 1581 | RRC = &ARM::DPRRegClass; |
| 1582 | // When NEON is used for SP, only half of the register file is available |
| 1583 | // because operations that define both SP and DP results will be constrained |
| 1584 | // to the VFP2 class (D0-D15). We currently model this constraint prior to |
| 1585 | // coalescing by double-counting the SP regs. See the FIXME above. |
| 1586 | if (Subtarget->useNEONForSinglePrecisionFP()) |
| 1587 | Cost = 2; |
| 1588 | break; |
| 1589 | case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64: |
| 1590 | case MVT::v4f32: case MVT::v2f64: |
| 1591 | RRC = &ARM::DPRRegClass; |
| 1592 | Cost = 2; |
| 1593 | break; |
| 1594 | case MVT::v4i64: |
| 1595 | RRC = &ARM::DPRRegClass; |
| 1596 | Cost = 4; |
| 1597 | break; |
| 1598 | case MVT::v8i64: |
| 1599 | RRC = &ARM::DPRRegClass; |
| 1600 | Cost = 8; |
| 1601 | break; |
| 1602 | } |
| 1603 | return std::make_pair(RRC, Cost); |
| 1604 | } |
| 1605 | |
| 1606 | const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const { |
| 1607 | switch ((ARMISD::NodeType)Opcode) { |
| 1608 | case ARMISD::FIRST_NUMBER: break; |
| 1609 | case ARMISD::Wrapper: return "ARMISD::Wrapper" ; |
| 1610 | case ARMISD::WrapperPIC: return "ARMISD::WrapperPIC" ; |
| 1611 | case ARMISD::WrapperJT: return "ARMISD::WrapperJT" ; |
| 1612 | case ARMISD::COPY_STRUCT_BYVAL: return "ARMISD::COPY_STRUCT_BYVAL" ; |
| 1613 | case ARMISD::CALL: return "ARMISD::CALL" ; |
| 1614 | case ARMISD::CALL_PRED: return "ARMISD::CALL_PRED" ; |
| 1615 | case ARMISD::CALL_NOLINK: return "ARMISD::CALL_NOLINK" ; |
| 1616 | case ARMISD::tSECALL: return "ARMISD::tSECALL" ; |
| 1617 | case ARMISD::BRCOND: return "ARMISD::BRCOND" ; |
| 1618 | case ARMISD::BR_JT: return "ARMISD::BR_JT" ; |
| 1619 | case ARMISD::BR2_JT: return "ARMISD::BR2_JT" ; |
| 1620 | case ARMISD::RET_FLAG: return "ARMISD::RET_FLAG" ; |
| 1621 | case ARMISD::SERET_FLAG: return "ARMISD::SERET_FLAG" ; |
| 1622 | case ARMISD::INTRET_FLAG: return "ARMISD::INTRET_FLAG" ; |
| 1623 | case ARMISD::PIC_ADD: return "ARMISD::PIC_ADD" ; |
| 1624 | case ARMISD::CMP: return "ARMISD::CMP" ; |
| 1625 | case ARMISD::CMN: return "ARMISD::CMN" ; |
| 1626 | case ARMISD::CMPZ: return "ARMISD::CMPZ" ; |
| 1627 | case ARMISD::CMPFP: return "ARMISD::CMPFP" ; |
| 1628 | case ARMISD::CMPFPE: return "ARMISD::CMPFPE" ; |
| 1629 | case ARMISD::CMPFPw0: return "ARMISD::CMPFPw0" ; |
| 1630 | case ARMISD::CMPFPEw0: return "ARMISD::CMPFPEw0" ; |
| 1631 | case ARMISD::BCC_i64: return "ARMISD::BCC_i64" ; |
| 1632 | case ARMISD::FMSTAT: return "ARMISD::FMSTAT" ; |
| 1633 | |
| 1634 | case ARMISD::CMOV: return "ARMISD::CMOV" ; |
| 1635 | case ARMISD::SUBS: return "ARMISD::SUBS" ; |
| 1636 | |
| 1637 | case ARMISD::SSAT: return "ARMISD::SSAT" ; |
| 1638 | case ARMISD::USAT: return "ARMISD::USAT" ; |
| 1639 | |
| 1640 | case ARMISD::ASRL: return "ARMISD::ASRL" ; |
| 1641 | case ARMISD::LSRL: return "ARMISD::LSRL" ; |
| 1642 | case ARMISD::LSLL: return "ARMISD::LSLL" ; |
| 1643 | |
| 1644 | case ARMISD::SRL_FLAG: return "ARMISD::SRL_FLAG" ; |
| 1645 | case ARMISD::SRA_FLAG: return "ARMISD::SRA_FLAG" ; |
| 1646 | case ARMISD::RRX: return "ARMISD::RRX" ; |
| 1647 | |
| 1648 | case ARMISD::ADDC: return "ARMISD::ADDC" ; |
| 1649 | case ARMISD::ADDE: return "ARMISD::ADDE" ; |
| 1650 | case ARMISD::SUBC: return "ARMISD::SUBC" ; |
| 1651 | case ARMISD::SUBE: return "ARMISD::SUBE" ; |
| 1652 | case ARMISD::LSLS: return "ARMISD::LSLS" ; |
| 1653 | |
| 1654 | case ARMISD::VMOVRRD: return "ARMISD::VMOVRRD" ; |
| 1655 | case ARMISD::VMOVDRR: return "ARMISD::VMOVDRR" ; |
| 1656 | case ARMISD::VMOVhr: return "ARMISD::VMOVhr" ; |
| 1657 | case ARMISD::VMOVrh: return "ARMISD::VMOVrh" ; |
| 1658 | case ARMISD::VMOVSR: return "ARMISD::VMOVSR" ; |
| 1659 | |
| 1660 | case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP" ; |
| 1661 | case ARMISD::EH_SJLJ_LONGJMP: return "ARMISD::EH_SJLJ_LONGJMP" ; |
| 1662 | case ARMISD::EH_SJLJ_SETUP_DISPATCH: return "ARMISD::EH_SJLJ_SETUP_DISPATCH" ; |
| 1663 | |
| 1664 | case ARMISD::TC_RETURN: return "ARMISD::TC_RETURN" ; |
| 1665 | |
| 1666 | case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER" ; |
| 1667 | |
| 1668 | case ARMISD::DYN_ALLOC: return "ARMISD::DYN_ALLOC" ; |
| 1669 | |
| 1670 | case ARMISD::MEMBARRIER_MCR: return "ARMISD::MEMBARRIER_MCR" ; |
| 1671 | |
| 1672 | case ARMISD::PRELOAD: return "ARMISD::PRELOAD" ; |
| 1673 | |
| 1674 | case ARMISD::LDRD: return "ARMISD::LDRD" ; |
| 1675 | case ARMISD::STRD: return "ARMISD::STRD" ; |
| 1676 | |
| 1677 | case ARMISD::WIN__CHKSTK: return "ARMISD::WIN__CHKSTK" ; |
| 1678 | case ARMISD::WIN__DBZCHK: return "ARMISD::WIN__DBZCHK" ; |
| 1679 | |
| 1680 | case ARMISD::PREDICATE_CAST: return "ARMISD::PREDICATE_CAST" ; |
| 1681 | case ARMISD::VECTOR_REG_CAST: return "ARMISD::VECTOR_REG_CAST" ; |
| 1682 | case ARMISD::VCMP: return "ARMISD::VCMP" ; |
| 1683 | case ARMISD::VCMPZ: return "ARMISD::VCMPZ" ; |
| 1684 | case ARMISD::VTST: return "ARMISD::VTST" ; |
| 1685 | |
| 1686 | case ARMISD::VSHLs: return "ARMISD::VSHLs" ; |
| 1687 | case ARMISD::VSHLu: return "ARMISD::VSHLu" ; |
| 1688 | case ARMISD::VSHLIMM: return "ARMISD::VSHLIMM" ; |
| 1689 | case ARMISD::VSHRsIMM: return "ARMISD::VSHRsIMM" ; |
| 1690 | case ARMISD::VSHRuIMM: return "ARMISD::VSHRuIMM" ; |
| 1691 | case ARMISD::VRSHRsIMM: return "ARMISD::VRSHRsIMM" ; |
| 1692 | case ARMISD::VRSHRuIMM: return "ARMISD::VRSHRuIMM" ; |
| 1693 | case ARMISD::VRSHRNIMM: return "ARMISD::VRSHRNIMM" ; |
| 1694 | case ARMISD::VQSHLsIMM: return "ARMISD::VQSHLsIMM" ; |
| 1695 | case ARMISD::VQSHLuIMM: return "ARMISD::VQSHLuIMM" ; |
| 1696 | case ARMISD::VQSHLsuIMM: return "ARMISD::VQSHLsuIMM" ; |
| 1697 | case ARMISD::VQSHRNsIMM: return "ARMISD::VQSHRNsIMM" ; |
| 1698 | case ARMISD::VQSHRNuIMM: return "ARMISD::VQSHRNuIMM" ; |
| 1699 | case ARMISD::VQSHRNsuIMM: return "ARMISD::VQSHRNsuIMM" ; |
| 1700 | case ARMISD::VQRSHRNsIMM: return "ARMISD::VQRSHRNsIMM" ; |
| 1701 | case ARMISD::VQRSHRNuIMM: return "ARMISD::VQRSHRNuIMM" ; |
| 1702 | case ARMISD::VQRSHRNsuIMM: return "ARMISD::VQRSHRNsuIMM" ; |
| 1703 | case ARMISD::VSLIIMM: return "ARMISD::VSLIIMM" ; |
| 1704 | case ARMISD::VSRIIMM: return "ARMISD::VSRIIMM" ; |
| 1705 | case ARMISD::VGETLANEu: return "ARMISD::VGETLANEu" ; |
| 1706 | case ARMISD::VGETLANEs: return "ARMISD::VGETLANEs" ; |
| 1707 | case ARMISD::VMOVIMM: return "ARMISD::VMOVIMM" ; |
| 1708 | case ARMISD::VMVNIMM: return "ARMISD::VMVNIMM" ; |
| 1709 | case ARMISD::VMOVFPIMM: return "ARMISD::VMOVFPIMM" ; |
| 1710 | case ARMISD::VDUP: return "ARMISD::VDUP" ; |
| 1711 | case ARMISD::VDUPLANE: return "ARMISD::VDUPLANE" ; |
| 1712 | case ARMISD::VEXT: return "ARMISD::VEXT" ; |
| 1713 | case ARMISD::VREV64: return "ARMISD::VREV64" ; |
| 1714 | case ARMISD::VREV32: return "ARMISD::VREV32" ; |
| 1715 | case ARMISD::VREV16: return "ARMISD::VREV16" ; |
| 1716 | case ARMISD::VZIP: return "ARMISD::VZIP" ; |
| 1717 | case ARMISD::VUZP: return "ARMISD::VUZP" ; |
| 1718 | case ARMISD::VTRN: return "ARMISD::VTRN" ; |
| 1719 | case ARMISD::VTBL1: return "ARMISD::VTBL1" ; |
| 1720 | case ARMISD::VTBL2: return "ARMISD::VTBL2" ; |
| 1721 | case ARMISD::VMOVN: return "ARMISD::VMOVN" ; |
| 1722 | case ARMISD::VQMOVNs: return "ARMISD::VQMOVNs" ; |
| 1723 | case ARMISD::VQMOVNu: return "ARMISD::VQMOVNu" ; |
| 1724 | case ARMISD::VCVTN: return "ARMISD::VCVTN" ; |
| 1725 | case ARMISD::VCVTL: return "ARMISD::VCVTL" ; |
| 1726 | case ARMISD::VMULLs: return "ARMISD::VMULLs" ; |
| 1727 | case ARMISD::VMULLu: return "ARMISD::VMULLu" ; |
| 1728 | case ARMISD::VQDMULH: return "ARMISD::VQDMULH" ; |
| 1729 | case ARMISD::VADDVs: return "ARMISD::VADDVs" ; |
| 1730 | case ARMISD::VADDVu: return "ARMISD::VADDVu" ; |
| 1731 | case ARMISD::VADDVps: return "ARMISD::VADDVps" ; |
| 1732 | case ARMISD::VADDVpu: return "ARMISD::VADDVpu" ; |
| 1733 | case ARMISD::VADDLVs: return "ARMISD::VADDLVs" ; |
| 1734 | case ARMISD::VADDLVu: return "ARMISD::VADDLVu" ; |
| 1735 | case ARMISD::VADDLVAs: return "ARMISD::VADDLVAs" ; |
| 1736 | case ARMISD::VADDLVAu: return "ARMISD::VADDLVAu" ; |
| 1737 | case ARMISD::VADDLVps: return "ARMISD::VADDLVps" ; |
| 1738 | case ARMISD::VADDLVpu: return "ARMISD::VADDLVpu" ; |
| 1739 | case ARMISD::VADDLVAps: return "ARMISD::VADDLVAps" ; |
| 1740 | case ARMISD::VADDLVApu: return "ARMISD::VADDLVApu" ; |
| 1741 | case ARMISD::VMLAVs: return "ARMISD::VMLAVs" ; |
| 1742 | case ARMISD::VMLAVu: return "ARMISD::VMLAVu" ; |
| 1743 | case ARMISD::VMLAVps: return "ARMISD::VMLAVps" ; |
| 1744 | case ARMISD::VMLAVpu: return "ARMISD::VMLAVpu" ; |
| 1745 | case ARMISD::VMLALVs: return "ARMISD::VMLALVs" ; |
| 1746 | case ARMISD::VMLALVu: return "ARMISD::VMLALVu" ; |
| 1747 | case ARMISD::VMLALVps: return "ARMISD::VMLALVps" ; |
| 1748 | case ARMISD::VMLALVpu: return "ARMISD::VMLALVpu" ; |
| 1749 | case ARMISD::VMLALVAs: return "ARMISD::VMLALVAs" ; |
| 1750 | case ARMISD::VMLALVAu: return "ARMISD::VMLALVAu" ; |
| 1751 | case ARMISD::VMLALVAps: return "ARMISD::VMLALVAps" ; |
| 1752 | case ARMISD::VMLALVApu: return "ARMISD::VMLALVApu" ; |
| 1753 | case ARMISD::VMINVu: return "ARMISD::VMINVu" ; |
| 1754 | case ARMISD::VMINVs: return "ARMISD::VMINVs" ; |
| 1755 | case ARMISD::VMAXVu: return "ARMISD::VMAXVu" ; |
| 1756 | case ARMISD::VMAXVs: return "ARMISD::VMAXVs" ; |
| 1757 | case ARMISD::UMAAL: return "ARMISD::UMAAL" ; |
| 1758 | case ARMISD::UMLAL: return "ARMISD::UMLAL" ; |
| 1759 | case ARMISD::SMLAL: return "ARMISD::SMLAL" ; |
| 1760 | case ARMISD::SMLALBB: return "ARMISD::SMLALBB" ; |
| 1761 | case ARMISD::SMLALBT: return "ARMISD::SMLALBT" ; |
| 1762 | case ARMISD::SMLALTB: return "ARMISD::SMLALTB" ; |
| 1763 | case ARMISD::SMLALTT: return "ARMISD::SMLALTT" ; |
| 1764 | case ARMISD::SMULWB: return "ARMISD::SMULWB" ; |
| 1765 | case ARMISD::SMULWT: return "ARMISD::SMULWT" ; |
| 1766 | case ARMISD::SMLALD: return "ARMISD::SMLALD" ; |
| 1767 | case ARMISD::SMLALDX: return "ARMISD::SMLALDX" ; |
| 1768 | case ARMISD::SMLSLD: return "ARMISD::SMLSLD" ; |
| 1769 | case ARMISD::SMLSLDX: return "ARMISD::SMLSLDX" ; |
| 1770 | case ARMISD::SMMLAR: return "ARMISD::SMMLAR" ; |
| 1771 | case ARMISD::SMMLSR: return "ARMISD::SMMLSR" ; |
| 1772 | case ARMISD::QADD16b: return "ARMISD::QADD16b" ; |
| 1773 | case ARMISD::QSUB16b: return "ARMISD::QSUB16b" ; |
| 1774 | case ARMISD::QADD8b: return "ARMISD::QADD8b" ; |
| 1775 | case ARMISD::QSUB8b: return "ARMISD::QSUB8b" ; |
| 1776 | case ARMISD::BUILD_VECTOR: return "ARMISD::BUILD_VECTOR" ; |
| 1777 | case ARMISD::BFI: return "ARMISD::BFI" ; |
| 1778 | case ARMISD::VORRIMM: return "ARMISD::VORRIMM" ; |
| 1779 | case ARMISD::VBICIMM: return "ARMISD::VBICIMM" ; |
| 1780 | case ARMISD::VBSP: return "ARMISD::VBSP" ; |
| 1781 | case ARMISD::MEMCPY: return "ARMISD::MEMCPY" ; |
| 1782 | case ARMISD::VLD1DUP: return "ARMISD::VLD1DUP" ; |
| 1783 | case ARMISD::VLD2DUP: return "ARMISD::VLD2DUP" ; |
| 1784 | case ARMISD::VLD3DUP: return "ARMISD::VLD3DUP" ; |
| 1785 | case ARMISD::VLD4DUP: return "ARMISD::VLD4DUP" ; |
| 1786 | case ARMISD::VLD1_UPD: return "ARMISD::VLD1_UPD" ; |
| 1787 | case ARMISD::VLD2_UPD: return "ARMISD::VLD2_UPD" ; |
| 1788 | case ARMISD::VLD3_UPD: return "ARMISD::VLD3_UPD" ; |
| 1789 | case ARMISD::VLD4_UPD: return "ARMISD::VLD4_UPD" ; |
| 1790 | case ARMISD::VLD2LN_UPD: return "ARMISD::VLD2LN_UPD" ; |
| 1791 | case ARMISD::VLD3LN_UPD: return "ARMISD::VLD3LN_UPD" ; |
| 1792 | case ARMISD::VLD4LN_UPD: return "ARMISD::VLD4LN_UPD" ; |
| 1793 | case ARMISD::VLD1DUP_UPD: return "ARMISD::VLD1DUP_UPD" ; |
| 1794 | case ARMISD::VLD2DUP_UPD: return "ARMISD::VLD2DUP_UPD" ; |
| 1795 | case ARMISD::VLD3DUP_UPD: return "ARMISD::VLD3DUP_UPD" ; |
| 1796 | case ARMISD::VLD4DUP_UPD: return "ARMISD::VLD4DUP_UPD" ; |
| 1797 | case ARMISD::VST1_UPD: return "ARMISD::VST1_UPD" ; |
| 1798 | case ARMISD::VST2_UPD: return "ARMISD::VST2_UPD" ; |
| 1799 | case ARMISD::VST3_UPD: return "ARMISD::VST3_UPD" ; |
| 1800 | case ARMISD::VST4_UPD: return "ARMISD::VST4_UPD" ; |
| 1801 | case ARMISD::VST2LN_UPD: return "ARMISD::VST2LN_UPD" ; |
| 1802 | case ARMISD::VST3LN_UPD: return "ARMISD::VST3LN_UPD" ; |
| 1803 | case ARMISD::VST4LN_UPD: return "ARMISD::VST4LN_UPD" ; |
| 1804 | case ARMISD::WLS: return "ARMISD::WLS" ; |
| 1805 | case ARMISD::LE: return "ARMISD::LE" ; |
| 1806 | case ARMISD::LOOP_DEC: return "ARMISD::LOOP_DEC" ; |
| 1807 | case ARMISD::CSINV: return "ARMISD::CSINV" ; |
| 1808 | case ARMISD::CSNEG: return "ARMISD::CSNEG" ; |
| 1809 | case ARMISD::CSINC: return "ARMISD::CSINC" ; |
| 1810 | } |
| 1811 | return nullptr; |
| 1812 | } |
| 1813 | |
| 1814 | EVT ARMTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &, |
| 1815 | EVT VT) const { |
| 1816 | if (!VT.isVector()) |
| 1817 | return getPointerTy(DL); |
| 1818 | |
| 1819 | // MVE has a predicate register. |
| 1820 | if (Subtarget->hasMVEIntegerOps() && |
| 1821 | (VT == MVT::v4i32 || VT == MVT::v8i16 || VT == MVT::v16i8)) |
| 1822 | return MVT::getVectorVT(MVT::i1, VT.getVectorElementCount()); |
| 1823 | return VT.changeVectorElementTypeToInteger(); |
| 1824 | } |
| 1825 | |
| 1826 | /// getRegClassFor - Return the register class that should be used for the |
| 1827 | /// specified value type. |
| 1828 | const TargetRegisterClass * |
| 1829 | ARMTargetLowering::getRegClassFor(MVT VT, bool isDivergent) const { |
| 1830 | (void)isDivergent; |
| 1831 | // Map v4i64 to QQ registers but do not make the type legal. Similarly map |
| 1832 | // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to |
| 1833 | // load / store 4 to 8 consecutive NEON D registers, or 2 to 4 consecutive |
| 1834 | // MVE Q registers. |
| 1835 | if (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) { |
| 1836 | if (VT == MVT::v4i64) |
| 1837 | return &ARM::QQPRRegClass; |
| 1838 | if (VT == MVT::v8i64) |
| 1839 | return &ARM::QQQQPRRegClass; |
| 1840 | } |
| 1841 | return TargetLowering::getRegClassFor(VT); |
| 1842 | } |
| 1843 | |
| 1844 | // memcpy, and other memory intrinsics, typically tries to use LDM/STM if the |
| 1845 | // source/dest is aligned and the copy size is large enough. We therefore want |
| 1846 | // to align such objects passed to memory intrinsics. |
| 1847 | bool ARMTargetLowering::shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize, |
| 1848 | unsigned &PrefAlign) const { |
| 1849 | if (!isa<MemIntrinsic>(CI)) |
| 1850 | return false; |
| 1851 | MinSize = 8; |
| 1852 | // On ARM11 onwards (excluding M class) 8-byte aligned LDM is typically 1 |
| 1853 | // cycle faster than 4-byte aligned LDM. |
| 1854 | PrefAlign = (Subtarget->hasV6Ops() && !Subtarget->isMClass() ? 8 : 4); |
| 1855 | return true; |
| 1856 | } |
| 1857 | |
| 1858 | // Create a fast isel object. |
| 1859 | FastISel * |
| 1860 | ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo, |
| 1861 | const TargetLibraryInfo *libInfo) const { |
| 1862 | return ARM::createFastISel(funcInfo, libInfo); |
| 1863 | } |
| 1864 | |
| 1865 | Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const { |
| 1866 | unsigned NumVals = N->getNumValues(); |
| 1867 | if (!NumVals) |
| 1868 | return Sched::RegPressure; |
| 1869 | |
| 1870 | for (unsigned i = 0; i != NumVals; ++i) { |
| 1871 | EVT VT = N->getValueType(i); |
| 1872 | if (VT == MVT::Glue || VT == MVT::Other) |
| 1873 | continue; |
| 1874 | if (VT.isFloatingPoint() || VT.isVector()) |
| 1875 | return Sched::ILP; |
| 1876 | } |
| 1877 | |
| 1878 | if (!N->isMachineOpcode()) |
| 1879 | return Sched::RegPressure; |
| 1880 | |
| 1881 | // Load are scheduled for latency even if there instruction itinerary |
| 1882 | // is not available. |
| 1883 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
| 1884 | const MCInstrDesc &MCID = TII->get(N->getMachineOpcode()); |
| 1885 | |
| 1886 | if (MCID.getNumDefs() == 0) |
| 1887 | return Sched::RegPressure; |
| 1888 | if (!Itins->isEmpty() && |
| 1889 | Itins->getOperandCycle(MCID.getSchedClass(), 0) > 2) |
| 1890 | return Sched::ILP; |
| 1891 | |
| 1892 | return Sched::RegPressure; |
| 1893 | } |
| 1894 | |
| 1895 | //===----------------------------------------------------------------------===// |
| 1896 | // Lowering Code |
| 1897 | //===----------------------------------------------------------------------===// |
| 1898 | |
| 1899 | static bool isSRL16(const SDValue &Op) { |
| 1900 | if (Op.getOpcode() != ISD::SRL) |
| 1901 | return false; |
| 1902 | if (auto Const = dyn_cast<ConstantSDNode>(Op.getOperand(1))) |
| 1903 | return Const->getZExtValue() == 16; |
| 1904 | return false; |
| 1905 | } |
| 1906 | |
| 1907 | static bool isSRA16(const SDValue &Op) { |
| 1908 | if (Op.getOpcode() != ISD::SRA) |
| 1909 | return false; |
| 1910 | if (auto Const = dyn_cast<ConstantSDNode>(Op.getOperand(1))) |
| 1911 | return Const->getZExtValue() == 16; |
| 1912 | return false; |
| 1913 | } |
| 1914 | |
| 1915 | static bool isSHL16(const SDValue &Op) { |
| 1916 | if (Op.getOpcode() != ISD::SHL) |
| 1917 | return false; |
| 1918 | if (auto Const = dyn_cast<ConstantSDNode>(Op.getOperand(1))) |
| 1919 | return Const->getZExtValue() == 16; |
| 1920 | return false; |
| 1921 | } |
| 1922 | |
| 1923 | // Check for a signed 16-bit value. We special case SRA because it makes it |
| 1924 | // more simple when also looking for SRAs that aren't sign extending a |
| 1925 | // smaller value. Without the check, we'd need to take extra care with |
| 1926 | // checking order for some operations. |
| 1927 | static bool isS16(const SDValue &Op, SelectionDAG &DAG) { |
| 1928 | if (isSRA16(Op)) |
| 1929 | return isSHL16(Op.getOperand(0)); |
| 1930 | return DAG.ComputeNumSignBits(Op) == 17; |
| 1931 | } |
| 1932 | |
| 1933 | /// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC |
| 1934 | static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) { |
| 1935 | switch (CC) { |
| 1936 | default: llvm_unreachable("Unknown condition code!" ); |
| 1937 | case ISD::SETNE: return ARMCC::NE; |
| 1938 | case ISD::SETEQ: return ARMCC::EQ; |
| 1939 | case ISD::SETGT: return ARMCC::GT; |
| 1940 | case ISD::SETGE: return ARMCC::GE; |
| 1941 | case ISD::SETLT: return ARMCC::LT; |
| 1942 | case ISD::SETLE: return ARMCC::LE; |
| 1943 | case ISD::SETUGT: return ARMCC::HI; |
| 1944 | case ISD::SETUGE: return ARMCC::HS; |
| 1945 | case ISD::SETULT: return ARMCC::LO; |
| 1946 | case ISD::SETULE: return ARMCC::LS; |
| 1947 | } |
| 1948 | } |
| 1949 | |
| 1950 | /// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC. |
| 1951 | static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode, |
| 1952 | ARMCC::CondCodes &CondCode2) { |
| 1953 | CondCode2 = ARMCC::AL; |
| 1954 | switch (CC) { |
| 1955 | default: llvm_unreachable("Unknown FP condition!" ); |
| 1956 | case ISD::SETEQ: |
| 1957 | case ISD::SETOEQ: CondCode = ARMCC::EQ; break; |
| 1958 | case ISD::SETGT: |
| 1959 | case ISD::SETOGT: CondCode = ARMCC::GT; break; |
| 1960 | case ISD::SETGE: |
| 1961 | case ISD::SETOGE: CondCode = ARMCC::GE; break; |
| 1962 | case ISD::SETOLT: CondCode = ARMCC::MI; break; |
| 1963 | case ISD::SETOLE: CondCode = ARMCC::LS; break; |
| 1964 | case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break; |
| 1965 | case ISD::SETO: CondCode = ARMCC::VC; break; |
| 1966 | case ISD::SETUO: CondCode = ARMCC::VS; break; |
| 1967 | case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break; |
| 1968 | case ISD::SETUGT: CondCode = ARMCC::HI; break; |
| 1969 | case ISD::SETUGE: CondCode = ARMCC::PL; break; |
| 1970 | case ISD::SETLT: |
| 1971 | case ISD::SETULT: CondCode = ARMCC::LT; break; |
| 1972 | case ISD::SETLE: |
| 1973 | case ISD::SETULE: CondCode = ARMCC::LE; break; |
| 1974 | case ISD::SETNE: |
| 1975 | case ISD::SETUNE: CondCode = ARMCC::NE; break; |
| 1976 | } |
| 1977 | } |
| 1978 | |
| 1979 | //===----------------------------------------------------------------------===// |
| 1980 | // Calling Convention Implementation |
| 1981 | //===----------------------------------------------------------------------===// |
| 1982 | |
| 1983 | /// getEffectiveCallingConv - Get the effective calling convention, taking into |
| 1984 | /// account presence of floating point hardware and calling convention |
| 1985 | /// limitations, such as support for variadic functions. |
| 1986 | CallingConv::ID |
| 1987 | ARMTargetLowering::getEffectiveCallingConv(CallingConv::ID CC, |
| 1988 | bool isVarArg) const { |
| 1989 | switch (CC) { |
| 1990 | default: |
| 1991 | report_fatal_error("Unsupported calling convention" ); |
| 1992 | case CallingConv::ARM_AAPCS: |
| 1993 | case CallingConv::ARM_APCS: |
| 1994 | case CallingConv::GHC: |
| 1995 | case CallingConv::CFGuard_Check: |
| 1996 | return CC; |
| 1997 | case CallingConv::PreserveMost: |
| 1998 | return CallingConv::PreserveMost; |
| 1999 | case CallingConv::ARM_AAPCS_VFP: |
| 2000 | case CallingConv::Swift: |
| 2001 | return isVarArg ? CallingConv::ARM_AAPCS : CallingConv::ARM_AAPCS_VFP; |
| 2002 | case CallingConv::C: |
| 2003 | if (!Subtarget->isAAPCS_ABI()) |
| 2004 | return CallingConv::ARM_APCS; |
| 2005 | else if (Subtarget->hasVFP2Base() && !Subtarget->isThumb1Only() && |
| 2006 | getTargetMachine().Options.FloatABIType == FloatABI::Hard && |
| 2007 | !isVarArg) |
| 2008 | return CallingConv::ARM_AAPCS_VFP; |
| 2009 | else |
| 2010 | return CallingConv::ARM_AAPCS; |
| 2011 | case CallingConv::Fast: |
| 2012 | case CallingConv::CXX_FAST_TLS: |
| 2013 | if (!Subtarget->isAAPCS_ABI()) { |
| 2014 | if (Subtarget->hasVFP2Base() && !Subtarget->isThumb1Only() && !isVarArg) |
| 2015 | return CallingConv::Fast; |
| 2016 | return CallingConv::ARM_APCS; |
| 2017 | } else if (Subtarget->hasVFP2Base() && |
| 2018 | !Subtarget->isThumb1Only() && !isVarArg) |
| 2019 | return CallingConv::ARM_AAPCS_VFP; |
| 2020 | else |
| 2021 | return CallingConv::ARM_AAPCS; |
| 2022 | } |
| 2023 | } |
| 2024 | |
| 2025 | CCAssignFn *ARMTargetLowering::CCAssignFnForCall(CallingConv::ID CC, |
| 2026 | bool isVarArg) const { |
| 2027 | return CCAssignFnForNode(CC, false, isVarArg); |
| 2028 | } |
| 2029 | |
| 2030 | CCAssignFn *ARMTargetLowering::CCAssignFnForReturn(CallingConv::ID CC, |
| 2031 | bool isVarArg) const { |
| 2032 | return CCAssignFnForNode(CC, true, isVarArg); |
| 2033 | } |
| 2034 | |
| 2035 | /// CCAssignFnForNode - Selects the correct CCAssignFn for the given |
| 2036 | /// CallingConvention. |
| 2037 | CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC, |
| 2038 | bool Return, |
| 2039 | bool isVarArg) const { |
| 2040 | switch (getEffectiveCallingConv(CC, isVarArg)) { |
| 2041 | default: |
| 2042 | report_fatal_error("Unsupported calling convention" ); |
| 2043 | case CallingConv::ARM_APCS: |
| 2044 | return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); |
| 2045 | case CallingConv::ARM_AAPCS: |
| 2046 | return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); |
| 2047 | case CallingConv::ARM_AAPCS_VFP: |
| 2048 | return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); |
| 2049 | case CallingConv::Fast: |
| 2050 | return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS); |
| 2051 | case CallingConv::GHC: |
| 2052 | return (Return ? RetCC_ARM_APCS : CC_ARM_APCS_GHC); |
| 2053 | case CallingConv::PreserveMost: |
| 2054 | return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); |
| 2055 | case CallingConv::CFGuard_Check: |
| 2056 | return (Return ? RetCC_ARM_AAPCS : CC_ARM_Win32_CFGuard_Check); |
| 2057 | } |
| 2058 | } |
| 2059 | |
| 2060 | SDValue ARMTargetLowering::MoveToHPR(const SDLoc &dl, SelectionDAG &DAG, |
| 2061 | MVT LocVT, MVT ValVT, SDValue Val) const { |
| 2062 | Val = DAG.getNode(ISD::BITCAST, dl, MVT::getIntegerVT(LocVT.getSizeInBits()), |
| 2063 | Val); |
| 2064 | if (Subtarget->hasFullFP16()) { |
| 2065 | Val = DAG.getNode(ARMISD::VMOVhr, dl, ValVT, Val); |
| 2066 | } else { |
| 2067 | Val = DAG.getNode(ISD::TRUNCATE, dl, |
| 2068 | MVT::getIntegerVT(ValVT.getSizeInBits()), Val); |
| 2069 | Val = DAG.getNode(ISD::BITCAST, dl, ValVT, Val); |
| 2070 | } |
| 2071 | return Val; |
| 2072 | } |
| 2073 | |
| 2074 | SDValue ARMTargetLowering::MoveFromHPR(const SDLoc &dl, SelectionDAG &DAG, |
| 2075 | MVT LocVT, MVT ValVT, |
| 2076 | SDValue Val) const { |
| 2077 | if (Subtarget->hasFullFP16()) { |
| 2078 | Val = DAG.getNode(ARMISD::VMOVrh, dl, |
| 2079 | MVT::getIntegerVT(LocVT.getSizeInBits()), Val); |
| 2080 | } else { |
| 2081 | Val = DAG.getNode(ISD::BITCAST, dl, |
| 2082 | MVT::getIntegerVT(ValVT.getSizeInBits()), Val); |
| 2083 | Val = DAG.getNode(ISD::ZERO_EXTEND, dl, |
| 2084 | MVT::getIntegerVT(LocVT.getSizeInBits()), Val); |
| 2085 | } |
| 2086 | return DAG.getNode(ISD::BITCAST, dl, LocVT, Val); |
| 2087 | } |
| 2088 | |
| 2089 | /// LowerCallResult - Lower the result values of a call into the |
| 2090 | /// appropriate copies out of appropriate physical registers. |
| 2091 | SDValue ARMTargetLowering::LowerCallResult( |
| 2092 | SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, |
| 2093 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, |
| 2094 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool isThisReturn, |
| 2095 | SDValue ThisVal) const { |
| 2096 | // Assign locations to each value returned by this call. |
| 2097 | SmallVector<CCValAssign, 16> RVLocs; |
| 2098 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, |
| 2099 | *DAG.getContext()); |
| 2100 | CCInfo.AnalyzeCallResult(Ins, CCAssignFnForReturn(CallConv, isVarArg)); |
| 2101 | |
| 2102 | // Copy all of the result registers out of their specified physreg. |
| 2103 | for (unsigned i = 0; i != RVLocs.size(); ++i) { |
| 2104 | CCValAssign VA = RVLocs[i]; |
| 2105 | |
| 2106 | // Pass 'this' value directly from the argument to return value, to avoid |
| 2107 | // reg unit interference |
| 2108 | if (i == 0 && isThisReturn) { |
| 2109 | assert(!VA.needsCustom() && VA.getLocVT() == MVT::i32 && |
| 2110 | "unexpected return calling convention register assignment" ); |
| 2111 | InVals.push_back(ThisVal); |
| 2112 | continue; |
| 2113 | } |
| 2114 | |
| 2115 | SDValue Val; |
| 2116 | if (VA.needsCustom() && |
| 2117 | (VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2f64)) { |
| 2118 | // Handle f64 or half of a v2f64. |
| 2119 | SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, |
| 2120 | InFlag); |
| 2121 | Chain = Lo.getValue(1); |
| 2122 | InFlag = Lo.getValue(2); |
| 2123 | VA = RVLocs[++i]; // skip ahead to next loc |
| 2124 | SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, |
| 2125 | InFlag); |
| 2126 | Chain = Hi.getValue(1); |
| 2127 | InFlag = Hi.getValue(2); |
| 2128 | if (!Subtarget->isLittle()) |
| 2129 | std::swap (Lo, Hi); |
| 2130 | Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); |
| 2131 | |
| 2132 | if (VA.getLocVT() == MVT::v2f64) { |
| 2133 | SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); |
| 2134 | Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, |
| 2135 | DAG.getConstant(0, dl, MVT::i32)); |
| 2136 | |
| 2137 | VA = RVLocs[++i]; // skip ahead to next loc |
| 2138 | Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); |
| 2139 | Chain = Lo.getValue(1); |
| 2140 | InFlag = Lo.getValue(2); |
| 2141 | VA = RVLocs[++i]; // skip ahead to next loc |
| 2142 | Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); |
| 2143 | Chain = Hi.getValue(1); |
| 2144 | InFlag = Hi.getValue(2); |
| 2145 | if (!Subtarget->isLittle()) |
| 2146 | std::swap (Lo, Hi); |
| 2147 | Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); |
| 2148 | Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, |
| 2149 | DAG.getConstant(1, dl, MVT::i32)); |
| 2150 | } |
| 2151 | } else { |
| 2152 | Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(), |
| 2153 | InFlag); |
| 2154 | Chain = Val.getValue(1); |
| 2155 | InFlag = Val.getValue(2); |
| 2156 | } |
| 2157 | |
| 2158 | switch (VA.getLocInfo()) { |
| 2159 | default: llvm_unreachable("Unknown loc info!" ); |
| 2160 | case CCValAssign::Full: break; |
| 2161 | case CCValAssign::BCvt: |
| 2162 | Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val); |
| 2163 | break; |
| 2164 | } |
| 2165 | |
| 2166 | // f16 arguments have their size extended to 4 bytes and passed as if they |
| 2167 | // had been copied to the LSBs of a 32-bit register. |
| 2168 | // For that, it's passed extended to i32 (soft ABI) or to f32 (hard ABI) |
| 2169 | if (VA.needsCustom() && |
| 2170 | (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16)) |
| 2171 | Val = MoveToHPR(dl, DAG, VA.getLocVT(), VA.getValVT(), Val); |
| 2172 | |
| 2173 | InVals.push_back(Val); |
| 2174 | } |
| 2175 | |
| 2176 | return Chain; |
| 2177 | } |
| 2178 | |
| 2179 | /// LowerMemOpCallTo - Store the argument to the stack. |
| 2180 | SDValue ARMTargetLowering::LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, |
| 2181 | SDValue Arg, const SDLoc &dl, |
| 2182 | SelectionDAG &DAG, |
| 2183 | const CCValAssign &VA, |
| 2184 | ISD::ArgFlagsTy Flags) const { |
| 2185 | unsigned LocMemOffset = VA.getLocMemOffset(); |
| 2186 | SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); |
| 2187 | PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()), |
| 2188 | StackPtr, PtrOff); |
| 2189 | return DAG.getStore( |
| 2190 | Chain, dl, Arg, PtrOff, |
| 2191 | MachinePointerInfo::getStack(DAG.getMachineFunction(), LocMemOffset)); |
| 2192 | } |
| 2193 | |
| 2194 | void ARMTargetLowering::PassF64ArgInRegs(const SDLoc &dl, SelectionDAG &DAG, |
| 2195 | SDValue Chain, SDValue &Arg, |
| 2196 | RegsToPassVector &RegsToPass, |
| 2197 | CCValAssign &VA, CCValAssign &NextVA, |
| 2198 | SDValue &StackPtr, |
| 2199 | SmallVectorImpl<SDValue> &MemOpChains, |
| 2200 | ISD::ArgFlagsTy Flags) const { |
| 2201 | SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, |
| 2202 | DAG.getVTList(MVT::i32, MVT::i32), Arg); |
| 2203 | unsigned id = Subtarget->isLittle() ? 0 : 1; |
| 2204 | RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd.getValue(id))); |
| 2205 | |
| 2206 | if (NextVA.isRegLoc()) |
| 2207 | RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1-id))); |
| 2208 | else { |
| 2209 | assert(NextVA.isMemLoc()); |
| 2210 | if (!StackPtr.getNode()) |
| 2211 | StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, |
| 2212 | getPointerTy(DAG.getDataLayout())); |
| 2213 | |
| 2214 | MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1-id), |
| 2215 | dl, DAG, NextVA, |
| 2216 | Flags)); |
| 2217 | } |
| 2218 | } |
| 2219 | |
| 2220 | /// LowerCall - Lowering a call into a callseq_start <- |
| 2221 | /// ARMISD:CALL <- callseq_end chain. Also add input and output parameter |
| 2222 | /// nodes. |
| 2223 | SDValue |
| 2224 | ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, |
| 2225 | SmallVectorImpl<SDValue> &InVals) const { |
| 2226 | SelectionDAG &DAG = CLI.DAG; |
| 2227 | SDLoc &dl = CLI.DL; |
| 2228 | SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; |
| 2229 | SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; |
| 2230 | SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; |
| 2231 | SDValue Chain = CLI.Chain; |
| 2232 | SDValue Callee = CLI.Callee; |
| 2233 | bool &isTailCall = CLI.IsTailCall; |
| 2234 | CallingConv::ID CallConv = CLI.CallConv; |
| 2235 | bool doesNotRet = CLI.DoesNotReturn; |
| 2236 | bool isVarArg = CLI.IsVarArg; |
| 2237 | |
| 2238 | MachineFunction &MF = DAG.getMachineFunction(); |
| 2239 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 2240 | MachineFunction::CallSiteInfo CSInfo; |
| 2241 | bool isStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet(); |
| 2242 | bool isThisReturn = false; |
| 2243 | bool isCmseNSCall = false; |
| 2244 | bool PreferIndirect = false; |
| 2245 | |
| 2246 | // Determine whether this is a non-secure function call. |
| 2247 | if (CLI.CB && CLI.CB->getAttributes().hasFnAttribute("cmse_nonsecure_call" )) |
| 2248 | isCmseNSCall = true; |
| 2249 | |
| 2250 | // Disable tail calls if they're not supported. |
| 2251 | if (!Subtarget->supportsTailCall()) |
| 2252 | isTailCall = false; |
| 2253 | |
| 2254 | // For both the non-secure calls and the returns from a CMSE entry function, |
| 2255 | // the function needs to do some extra work afte r the call, or before the |
| 2256 | // return, respectively, thus it cannot end with atail call |
| 2257 | if (isCmseNSCall || AFI->isCmseNSEntryFunction()) |
| 2258 | isTailCall = false; |
| 2259 | |
| 2260 | if (isa<GlobalAddressSDNode>(Callee)) { |
| 2261 | // If we're optimizing for minimum size and the function is called three or |
| 2262 | // more times in this block, we can improve codesize by calling indirectly |
| 2263 | // as BLXr has a 16-bit encoding. |
| 2264 | auto *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal(); |
| 2265 | if (CLI.CB) { |
| 2266 | auto *BB = CLI.CB->getParent(); |
| 2267 | PreferIndirect = Subtarget->isThumb() && Subtarget->hasMinSize() && |
| 2268 | count_if(GV->users(), [&BB](const User *U) { |
| 2269 | return isa<Instruction>(U) && |
| 2270 | cast<Instruction>(U)->getParent() == BB; |
| 2271 | }) > 2; |
| 2272 | } |
| 2273 | } |
| 2274 | if (isTailCall) { |
| 2275 | // Check if it's really possible to do a tail call. |
| 2276 | isTailCall = IsEligibleForTailCallOptimization( |
| 2277 | Callee, CallConv, isVarArg, isStructRet, |
| 2278 | MF.getFunction().hasStructRetAttr(), Outs, OutVals, Ins, DAG, |
| 2279 | PreferIndirect); |
| 2280 | if (!isTailCall && CLI.CB && CLI.CB->isMustTailCall()) |
| 2281 | report_fatal_error("failed to perform tail call elimination on a call " |
| 2282 | "site marked musttail" ); |
| 2283 | // We don't support GuaranteedTailCallOpt for ARM, only automatically |
| 2284 | // detected sibcalls. |
| 2285 | if (isTailCall) |
| 2286 | ++NumTailCalls; |
| 2287 | } |
| 2288 | |
| 2289 | // Analyze operands of the call, assigning locations to each operand. |
| 2290 | SmallVector<CCValAssign, 16> ArgLocs; |
| 2291 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, |
| 2292 | *DAG.getContext()); |
| 2293 | CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CallConv, isVarArg)); |
| 2294 | |
| 2295 | // Get a count of how many bytes are to be pushed on the stack. |
| 2296 | unsigned NumBytes = CCInfo.getNextStackOffset(); |
| 2297 | |
| 2298 | if (isTailCall) { |
| 2299 | // For tail calls, memory operands are available in our caller's stack. |
| 2300 | NumBytes = 0; |
| 2301 | } else { |
| 2302 | // Adjust the stack pointer for the new arguments... |
| 2303 | // These operations are automatically eliminated by the prolog/epilog pass |
| 2304 | Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); |
| 2305 | } |
| 2306 | |
| 2307 | SDValue StackPtr = |
| 2308 | DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy(DAG.getDataLayout())); |
| 2309 | |
| 2310 | RegsToPassVector RegsToPass; |
| 2311 | SmallVector<SDValue, 8> MemOpChains; |
| 2312 | |
| 2313 | // Walk the register/memloc assignments, inserting copies/loads. In the case |
| 2314 | // of tail call optimization, arguments are handled later. |
| 2315 | for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); |
| 2316 | i != e; |
| 2317 | ++i, ++realArgIdx) { |
| 2318 | CCValAssign &VA = ArgLocs[i]; |
| 2319 | SDValue Arg = OutVals[realArgIdx]; |
| 2320 | ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; |
| 2321 | bool isByVal = Flags.isByVal(); |
| 2322 | |
| 2323 | // Promote the value if needed. |
| 2324 | switch (VA.getLocInfo()) { |
| 2325 | default: llvm_unreachable("Unknown loc info!" ); |
| 2326 | case CCValAssign::Full: break; |
| 2327 | case CCValAssign::SExt: |
| 2328 | Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); |
| 2329 | break; |
| 2330 | case CCValAssign::ZExt: |
| 2331 | Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); |
| 2332 | break; |
| 2333 | case CCValAssign::AExt: |
| 2334 | Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); |
| 2335 | break; |
| 2336 | case CCValAssign::BCvt: |
| 2337 | Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); |
| 2338 | break; |
| 2339 | } |
| 2340 | |
| 2341 | // f16 arguments have their size extended to 4 bytes and passed as if they |
| 2342 | // had been copied to the LSBs of a 32-bit register. |
| 2343 | // For that, it's passed extended to i32 (soft ABI) or to f32 (hard ABI) |
| 2344 | if (VA.needsCustom() && |
| 2345 | (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16)) { |
| 2346 | Arg = MoveFromHPR(dl, DAG, VA.getLocVT(), VA.getValVT(), Arg); |
| 2347 | } else { |
| 2348 | // f16 arguments could have been extended prior to argument lowering. |
| 2349 | // Mask them arguments if this is a CMSE nonsecure call. |
| 2350 | auto ArgVT = Outs[realArgIdx].ArgVT; |
| 2351 | if (isCmseNSCall && (ArgVT == MVT::f16)) { |
| 2352 | auto LocBits = VA.getLocVT().getSizeInBits(); |
| 2353 | auto MaskValue = APInt::getLowBitsSet(LocBits, ArgVT.getSizeInBits()); |
| 2354 | SDValue Mask = |
| 2355 | DAG.getConstant(MaskValue, dl, MVT::getIntegerVT(LocBits)); |
| 2356 | Arg = DAG.getNode(ISD::BITCAST, dl, MVT::getIntegerVT(LocBits), Arg); |
| 2357 | Arg = DAG.getNode(ISD::AND, dl, MVT::getIntegerVT(LocBits), Arg, Mask); |
| 2358 | Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); |
| 2359 | } |
| 2360 | } |
| 2361 | |
| 2362 | // f64 and v2f64 might be passed in i32 pairs and must be split into pieces |
| 2363 | if (VA.needsCustom() && VA.getLocVT() == MVT::v2f64) { |
| 2364 | SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, |
| 2365 | DAG.getConstant(0, dl, MVT::i32)); |
| 2366 | SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, |
| 2367 | DAG.getConstant(1, dl, MVT::i32)); |
| 2368 | |
| 2369 | PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass, VA, ArgLocs[++i], |
| 2370 | StackPtr, MemOpChains, Flags); |
| 2371 | |
| 2372 | VA = ArgLocs[++i]; // skip ahead to next loc |
| 2373 | if (VA.isRegLoc()) { |
| 2374 | PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass, VA, ArgLocs[++i], |
| 2375 | StackPtr, MemOpChains, Flags); |
| 2376 | } else { |
| 2377 | assert(VA.isMemLoc()); |
| 2378 | |
| 2379 | MemOpChains.push_back( |
| 2380 | LowerMemOpCallTo(Chain, StackPtr, Op1, dl, DAG, VA, Flags)); |
| 2381 | } |
| 2382 | } else if (VA.needsCustom() && VA.getLocVT() == MVT::f64) { |
| 2383 | PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i], |
| 2384 | StackPtr, MemOpChains, Flags); |
| 2385 | } else if (VA.isRegLoc()) { |
| 2386 | if (realArgIdx == 0 && Flags.isReturned() && !Flags.isSwiftSelf() && |
| 2387 | Outs[0].VT == MVT::i32) { |
| 2388 | assert(VA.getLocVT() == MVT::i32 && |
| 2389 | "unexpected calling convention register assignment" ); |
| 2390 | assert(!Ins.empty() && Ins[0].VT == MVT::i32 && |
| 2391 | "unexpected use of 'returned'" ); |
| 2392 | isThisReturn = true; |
| 2393 | } |
| 2394 | const TargetOptions &Options = DAG.getTarget().Options; |
| 2395 | if (Options.EmitCallSiteInfo) |
| 2396 | CSInfo.emplace_back(VA.getLocReg(), i); |
| 2397 | RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); |
| 2398 | } else if (isByVal) { |
| 2399 | assert(VA.isMemLoc()); |
| 2400 | unsigned offset = 0; |
| 2401 | |
| 2402 | // True if this byval aggregate will be split between registers |
| 2403 | // and memory. |
| 2404 | unsigned ByValArgsCount = CCInfo.getInRegsParamsCount(); |
| 2405 | unsigned CurByValIdx = CCInfo.getInRegsParamsProcessed(); |
| 2406 | |
| 2407 | if (CurByValIdx < ByValArgsCount) { |
| 2408 | |
| 2409 | unsigned RegBegin, RegEnd; |
| 2410 | CCInfo.getInRegsParamInfo(CurByValIdx, RegBegin, RegEnd); |
| 2411 | |
| 2412 | EVT PtrVT = |
| 2413 | DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); |
| 2414 | unsigned int i, j; |
| 2415 | for (i = 0, j = RegBegin; j < RegEnd; i++, j++) { |
| 2416 | SDValue Const = DAG.getConstant(4*i, dl, MVT::i32); |
| 2417 | SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); |
| 2418 | SDValue Load = |
| 2419 | DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo(), |
| 2420 | DAG.InferPtrAlign(AddArg)); |
| 2421 | MemOpChains.push_back(Load.getValue(1)); |
| 2422 | RegsToPass.push_back(std::make_pair(j, Load)); |
| 2423 | } |
| 2424 | |
| 2425 | // If parameter size outsides register area, "offset" value |
| 2426 | // helps us to calculate stack slot for remained part properly. |
| 2427 | offset = RegEnd - RegBegin; |
| 2428 | |
| 2429 | CCInfo.nextInRegsParam(); |
| 2430 | } |
| 2431 | |
| 2432 | if (Flags.getByValSize() > 4*offset) { |
| 2433 | auto PtrVT = getPointerTy(DAG.getDataLayout()); |
| 2434 | unsigned LocMemOffset = VA.getLocMemOffset(); |
| 2435 | SDValue StkPtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); |
| 2436 | SDValue Dst = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, StkPtrOff); |
| 2437 | SDValue SrcOffset = DAG.getIntPtrConstant(4*offset, dl); |
| 2438 | SDValue Src = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, SrcOffset); |
| 2439 | SDValue SizeNode = DAG.getConstant(Flags.getByValSize() - 4*offset, dl, |
| 2440 | MVT::i32); |
| 2441 | SDValue AlignNode = |
| 2442 | DAG.getConstant(Flags.getNonZeroByValAlign().value(), dl, MVT::i32); |
| 2443 | |
| 2444 | SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); |
| 2445 | SDValue Ops[] = { Chain, Dst, Src, SizeNode, AlignNode}; |
| 2446 | MemOpChains.push_back(DAG.getNode(ARMISD::COPY_STRUCT_BYVAL, dl, VTs, |
| 2447 | Ops)); |
| 2448 | } |
| 2449 | } else if (!isTailCall) { |
| 2450 | assert(VA.isMemLoc()); |
| 2451 | |
| 2452 | MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg, |
| 2453 | dl, DAG, VA, Flags)); |
| 2454 | } |
| 2455 | } |
| 2456 | |
| 2457 | if (!MemOpChains.empty()) |
| 2458 | Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); |
| 2459 | |
| 2460 | // Build a sequence of copy-to-reg nodes chained together with token chain |
| 2461 | // and flag operands which copy the outgoing args into the appropriate regs. |
| 2462 | SDValue InFlag; |
| 2463 | for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { |
| 2464 | Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, |
| 2465 | RegsToPass[i].second, InFlag); |
| 2466 | InFlag = Chain.getValue(1); |
| 2467 | } |
| 2468 | |
| 2469 | // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every |
| 2470 | // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol |
| 2471 | // node so that legalize doesn't hack it. |
| 2472 | bool isDirect = false; |
| 2473 | |
| 2474 | const TargetMachine &TM = getTargetMachine(); |
| 2475 | const Module *Mod = MF.getFunction().getParent(); |
| 2476 | const GlobalValue *GV = nullptr; |
| 2477 | if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) |
| 2478 | GV = G->getGlobal(); |
| 2479 | bool isStub = |
| 2480 | !TM.shouldAssumeDSOLocal(*Mod, GV) && Subtarget->isTargetMachO(); |
| 2481 | |
| 2482 | bool isARMFunc = !Subtarget->isThumb() || (isStub && !Subtarget->isMClass()); |
| 2483 | bool isLocalARMFunc = false; |
| 2484 | auto PtrVt = getPointerTy(DAG.getDataLayout()); |
| 2485 | |
| 2486 | if (Subtarget->genLongCalls()) { |
| 2487 | assert((!isPositionIndependent() || Subtarget->isTargetWindows()) && |
| 2488 | "long-calls codegen is not position independent!" ); |
| 2489 | // Handle a global address or an external symbol. If it's not one of |
| 2490 | // those, the target's already in a register, so we don't need to do |
| 2491 | // anything extra. |
| 2492 | if (isa<GlobalAddressSDNode>(Callee)) { |
| 2493 | // Create a constant pool entry for the callee address |
| 2494 | unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); |
| 2495 | ARMConstantPoolValue *CPV = |
| 2496 | ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 0); |
| 2497 | |
| 2498 | // Get the address of the callee into a register |
| 2499 | SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, Align(4)); |
| 2500 | CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); |
| 2501 | Callee = DAG.getLoad( |
| 2502 | PtrVt, dl, DAG.getEntryNode(), CPAddr, |
| 2503 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); |
| 2504 | } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) { |
| 2505 | const char *Sym = S->getSymbol(); |
| 2506 | |
| 2507 | // Create a constant pool entry for the callee address |
| 2508 | unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); |
| 2509 | ARMConstantPoolValue *CPV = |
| 2510 | ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym, |
| 2511 | ARMPCLabelIndex, 0); |
| 2512 | // Get the address of the callee into a register |
| 2513 | SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, Align(4)); |
| 2514 | CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); |
| 2515 | Callee = DAG.getLoad( |
| 2516 | PtrVt, dl, DAG.getEntryNode(), CPAddr, |
| 2517 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); |
| 2518 | } |
| 2519 | } else if (isa<GlobalAddressSDNode>(Callee)) { |
| 2520 | if (!PreferIndirect) { |
| 2521 | isDirect = true; |
| 2522 | bool isDef = GV->isStrongDefinitionForLinker(); |
| 2523 | |
| 2524 | // ARM call to a local ARM function is predicable. |
| 2525 | isLocalARMFunc = !Subtarget->isThumb() && (isDef || !ARMInterworking); |
| 2526 | // tBX takes a register source operand. |
| 2527 | if (isStub && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { |
| 2528 | assert(Subtarget->isTargetMachO() && "WrapperPIC use on non-MachO?" ); |
| 2529 | Callee = DAG.getNode( |
| 2530 | ARMISD::WrapperPIC, dl, PtrVt, |
| 2531 | DAG.getTargetGlobalAddress(GV, dl, PtrVt, 0, ARMII::MO_NONLAZY)); |
| 2532 | Callee = DAG.getLoad( |
| 2533 | PtrVt, dl, DAG.getEntryNode(), Callee, |
| 2534 | MachinePointerInfo::getGOT(DAG.getMachineFunction()), MaybeAlign(), |
| 2535 | MachineMemOperand::MODereferenceable | |
| 2536 | MachineMemOperand::MOInvariant); |
| 2537 | } else if (Subtarget->isTargetCOFF()) { |
| 2538 | assert(Subtarget->isTargetWindows() && |
| 2539 | "Windows is the only supported COFF target" ); |
| 2540 | unsigned TargetFlags = ARMII::MO_NO_FLAG; |
| 2541 | if (GV->hasDLLImportStorageClass()) |
| 2542 | TargetFlags = ARMII::MO_DLLIMPORT; |
| 2543 | else if (!TM.shouldAssumeDSOLocal(*GV->getParent(), GV)) |
| 2544 | TargetFlags = ARMII::MO_COFFSTUB; |
| 2545 | Callee = DAG.getTargetGlobalAddress(GV, dl, PtrVt, /*offset=*/0, |
| 2546 | TargetFlags); |
| 2547 | if (TargetFlags & (ARMII::MO_DLLIMPORT | ARMII::MO_COFFSTUB)) |
| 2548 | Callee = |
| 2549 | DAG.getLoad(PtrVt, dl, DAG.getEntryNode(), |
| 2550 | DAG.getNode(ARMISD::Wrapper, dl, PtrVt, Callee), |
| 2551 | MachinePointerInfo::getGOT(DAG.getMachineFunction())); |
| 2552 | } else { |
| 2553 | Callee = DAG.getTargetGlobalAddress(GV, dl, PtrVt, 0, 0); |
| 2554 | } |
| 2555 | } |
| 2556 | } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { |
| 2557 | isDirect = true; |
| 2558 | // tBX takes a register source operand. |
| 2559 | const char *Sym = S->getSymbol(); |
| 2560 | if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { |
| 2561 | unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); |
| 2562 | ARMConstantPoolValue *CPV = |
| 2563 | ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym, |
| 2564 | ARMPCLabelIndex, 4); |
| 2565 | SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, Align(4)); |
| 2566 | CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); |
| 2567 | Callee = DAG.getLoad( |
| 2568 | PtrVt, dl, DAG.getEntryNode(), CPAddr, |
| 2569 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); |
| 2570 | SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32); |
| 2571 | Callee = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVt, Callee, PICLabel); |
| 2572 | } else { |
| 2573 | Callee = DAG.getTargetExternalSymbol(Sym, PtrVt, 0); |
| 2574 | } |
| 2575 | } |
| 2576 | |
| 2577 | if (isCmseNSCall) { |
| 2578 | assert(!isARMFunc && !isDirect && |
| 2579 | "Cannot handle call to ARM function or direct call" ); |
| 2580 | if (NumBytes > 0) { |
| 2581 | DiagnosticInfoUnsupported Diag(DAG.getMachineFunction().getFunction(), |
| 2582 | "call to non-secure function would " |
| 2583 | "require passing arguments on stack" , |
| 2584 | dl.getDebugLoc()); |
| 2585 | DAG.getContext()->diagnose(Diag); |
| 2586 | } |
| 2587 | if (isStructRet) { |
| 2588 | DiagnosticInfoUnsupported Diag( |
| 2589 | DAG.getMachineFunction().getFunction(), |
| 2590 | "call to non-secure function would return value through pointer" , |
| 2591 | dl.getDebugLoc()); |
| 2592 | DAG.getContext()->diagnose(Diag); |
| 2593 | } |
| 2594 | } |
| 2595 | |
| 2596 | // FIXME: handle tail calls differently. |
| 2597 | unsigned CallOpc; |
| 2598 | if (Subtarget->isThumb()) { |
| 2599 | if (isCmseNSCall) |
| 2600 | CallOpc = ARMISD::tSECALL; |
| 2601 | else if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps()) |
| 2602 | CallOpc = ARMISD::CALL_NOLINK; |
| 2603 | else |
| 2604 | CallOpc = ARMISD::CALL; |
| 2605 | } else { |
| 2606 | if (!isDirect && !Subtarget->hasV5TOps()) |
| 2607 | CallOpc = ARMISD::CALL_NOLINK; |
| 2608 | else if (doesNotRet && isDirect && Subtarget->hasRetAddrStack() && |
| 2609 | // Emit regular call when code size is the priority |
| 2610 | !Subtarget->hasMinSize()) |
| 2611 | // "mov lr, pc; b _foo" to avoid confusing the RSP |
| 2612 | CallOpc = ARMISD::CALL_NOLINK; |
| 2613 | else |
| 2614 | CallOpc = isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL; |
| 2615 | } |
| 2616 | |
| 2617 | std::vector<SDValue> Ops; |
| 2618 | Ops.push_back(Chain); |
| 2619 | Ops.push_back(Callee); |
| 2620 | |
| 2621 | // Add argument registers to the end of the list so that they are known live |
| 2622 | // into the call. |
| 2623 | for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) |
| 2624 | Ops.push_back(DAG.getRegister(RegsToPass[i].first, |
| 2625 | RegsToPass[i].second.getValueType())); |
| 2626 | |
| 2627 | // Add a register mask operand representing the call-preserved registers. |
| 2628 | if (!isTailCall) { |
| 2629 | const uint32_t *Mask; |
| 2630 | const ARMBaseRegisterInfo *ARI = Subtarget->getRegisterInfo(); |
| 2631 | if (isThisReturn) { |
| 2632 | // For 'this' returns, use the R0-preserving mask if applicable |
| 2633 | Mask = ARI->getThisReturnPreservedMask(MF, CallConv); |
| 2634 | if (!Mask) { |
| 2635 | // Set isThisReturn to false if the calling convention is not one that |
| 2636 | // allows 'returned' to be modeled in this way, so LowerCallResult does |
| 2637 | // not try to pass 'this' straight through |
| 2638 | isThisReturn = false; |
| 2639 | Mask = ARI->getCallPreservedMask(MF, CallConv); |
| 2640 | } |
| 2641 | } else |
| 2642 | Mask = ARI->getCallPreservedMask(MF, CallConv); |
| 2643 | |
| 2644 | assert(Mask && "Missing call preserved mask for calling convention" ); |
| 2645 | Ops.push_back(DAG.getRegisterMask(Mask)); |
| 2646 | } |
| 2647 | |
| 2648 | if (InFlag.getNode()) |
| 2649 | Ops.push_back(InFlag); |
| 2650 | |
| 2651 | SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); |
| 2652 | if (isTailCall) { |
| 2653 | MF.getFrameInfo().setHasTailCall(); |
| 2654 | SDValue Ret = DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, Ops); |
| 2655 | DAG.addCallSiteInfo(Ret.getNode(), std::move(CSInfo)); |
| 2656 | return Ret; |
| 2657 | } |
| 2658 | |
| 2659 | // Returns a chain and a flag for retval copy to use. |
| 2660 | Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops); |
| 2661 | DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge); |
| 2662 | InFlag = Chain.getValue(1); |
| 2663 | DAG.addCallSiteInfo(Chain.getNode(), std::move(CSInfo)); |
| 2664 | |
| 2665 | Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), |
| 2666 | DAG.getIntPtrConstant(0, dl, true), InFlag, dl); |
| 2667 | if (!Ins.empty()) |
| 2668 | InFlag = Chain.getValue(1); |
| 2669 | |
| 2670 | // Handle result values, copying them out of physregs into vregs that we |
| 2671 | // return. |
| 2672 | return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG, |
| 2673 | InVals, isThisReturn, |
| 2674 | isThisReturn ? OutVals[0] : SDValue()); |
| 2675 | } |
| 2676 | |
| 2677 | /// HandleByVal - Every parameter *after* a byval parameter is passed |
| 2678 | /// on the stack. Remember the next parameter register to allocate, |
| 2679 | /// and then confiscate the rest of the parameter registers to insure |
| 2680 | /// this. |
| 2681 | void ARMTargetLowering::HandleByVal(CCState *State, unsigned &Size, |
| 2682 | Align Alignment) const { |
| 2683 | // Byval (as with any stack) slots are always at least 4 byte aligned. |
| 2684 | Alignment = std::max(Alignment, Align(4)); |
| 2685 | |
| 2686 | unsigned Reg = State->AllocateReg(GPRArgRegs); |
| 2687 | if (!Reg) |
| 2688 | return; |
| 2689 | |
| 2690 | unsigned AlignInRegs = Alignment.value() / 4; |
| 2691 | unsigned Waste = (ARM::R4 - Reg) % AlignInRegs; |
| 2692 | for (unsigned i = 0; i < Waste; ++i) |
| 2693 | Reg = State->AllocateReg(GPRArgRegs); |
| 2694 | |
| 2695 | if (!Reg) |
| 2696 | return; |
| 2697 | |
| 2698 | unsigned Excess = 4 * (ARM::R4 - Reg); |
| 2699 | |
| 2700 | // Special case when NSAA != SP and parameter size greater than size of |
| 2701 | // all remained GPR regs. In that case we can't split parameter, we must |
| 2702 | // send it to stack. We also must set NCRN to R4, so waste all |
| 2703 | // remained registers. |
| 2704 | const unsigned NSAAOffset = State->getNextStackOffset(); |
| 2705 | if (NSAAOffset != 0 && Size > Excess) { |
| 2706 | while (State->AllocateReg(GPRArgRegs)) |
| 2707 | ; |
| 2708 | return; |
| 2709 | } |
| 2710 | |
| 2711 | // First register for byval parameter is the first register that wasn't |
| 2712 | // allocated before this method call, so it would be "reg". |
| 2713 | // If parameter is small enough to be saved in range [reg, r4), then |
| 2714 | // the end (first after last) register would be reg + param-size-in-regs, |
| 2715 | // else parameter would be splitted between registers and stack, |
| 2716 | // end register would be r4 in this case. |
| 2717 | unsigned ByValRegBegin = Reg; |
| 2718 | unsigned ByValRegEnd = std::min<unsigned>(Reg + Size / 4, ARM::R4); |
| 2719 | State->addInRegsParamInfo(ByValRegBegin, ByValRegEnd); |
| 2720 | // Note, first register is allocated in the beginning of function already, |
| 2721 | // allocate remained amount of registers we need. |
| 2722 | for (unsigned i = Reg + 1; i != ByValRegEnd; ++i) |
| 2723 | State->AllocateReg(GPRArgRegs); |
| 2724 | // A byval parameter that is split between registers and memory needs its |
| 2725 | // size truncated here. |
| 2726 | // In the case where the entire structure fits in registers, we set the |
| 2727 | // size in memory to zero. |
| 2728 | Size = std::max<int>(Size - Excess, 0); |
| 2729 | } |
| 2730 | |
| 2731 | /// MatchingStackOffset - Return true if the given stack call argument is |
| 2732 | /// already available in the same position (relatively) of the caller's |
| 2733 | /// incoming argument stack. |
| 2734 | static |
| 2735 | bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, |
| 2736 | MachineFrameInfo &MFI, const MachineRegisterInfo *MRI, |
| 2737 | const TargetInstrInfo *TII) { |
| 2738 | unsigned Bytes = Arg.getValueSizeInBits() / 8; |
| 2739 | int FI = std::numeric_limits<int>::max(); |
| 2740 | if (Arg.getOpcode() == ISD::CopyFromReg) { |
| 2741 | unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg(); |
| 2742 | if (!Register::isVirtualRegister(VR)) |
| 2743 | return false; |
| 2744 | MachineInstr *Def = MRI->getVRegDef(VR); |
| 2745 | if (!Def) |
| 2746 | return false; |
| 2747 | if (!Flags.isByVal()) { |
| 2748 | if (!TII->isLoadFromStackSlot(*Def, FI)) |
| 2749 | return false; |
| 2750 | } else { |
| 2751 | return false; |
| 2752 | } |
| 2753 | } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) { |
| 2754 | if (Flags.isByVal()) |
| 2755 | // ByVal argument is passed in as a pointer but it's now being |
| 2756 | // dereferenced. e.g. |
| 2757 | // define @foo(%struct.X* %A) { |
| 2758 | // tail call @bar(%struct.X* byval %A) |
| 2759 | // } |
| 2760 | return false; |
| 2761 | SDValue Ptr = Ld->getBasePtr(); |
| 2762 | FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr); |
| 2763 | if (!FINode) |
| 2764 | return false; |
| 2765 | FI = FINode->getIndex(); |
| 2766 | } else |
| 2767 | return false; |
| 2768 | |
| 2769 | assert(FI != std::numeric_limits<int>::max()); |
| 2770 | if (!MFI.isFixedObjectIndex(FI)) |
| 2771 | return false; |
| 2772 | return Offset == MFI.getObjectOffset(FI) && Bytes == MFI.getObjectSize(FI); |
| 2773 | } |
| 2774 | |
| 2775 | /// IsEligibleForTailCallOptimization - Check whether the call is eligible |
| 2776 | /// for tail call optimization. Targets which want to do tail call |
| 2777 | /// optimization should implement this function. |
| 2778 | bool ARMTargetLowering::IsEligibleForTailCallOptimization( |
| 2779 | SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg, |
| 2780 | bool isCalleeStructRet, bool isCallerStructRet, |
| 2781 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
| 2782 | const SmallVectorImpl<SDValue> &OutVals, |
| 2783 | const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG, |
| 2784 | const bool isIndirect) const { |
| 2785 | MachineFunction &MF = DAG.getMachineFunction(); |
| 2786 | const Function &CallerF = MF.getFunction(); |
| 2787 | CallingConv::ID CallerCC = CallerF.getCallingConv(); |
| 2788 | |
| 2789 | assert(Subtarget->supportsTailCall()); |
| 2790 | |
| 2791 | // Indirect tail calls cannot be optimized for Thumb1 if the args |
| 2792 | // to the call take up r0-r3. The reason is that there are no legal registers |
| 2793 | // left to hold the pointer to the function to be called. |
| 2794 | if (Subtarget->isThumb1Only() && Outs.size() >= 4 && |
| 2795 | (!isa<GlobalAddressSDNode>(Callee.getNode()) || isIndirect)) |
| 2796 | return false; |
| 2797 | |
| 2798 | // Look for obvious safe cases to perform tail call optimization that do not |
| 2799 | // require ABI changes. This is what gcc calls sibcall. |
| 2800 | |
| 2801 | // Exception-handling functions need a special set of instructions to indicate |
| 2802 | // a return to the hardware. Tail-calling another function would probably |
| 2803 | // break this. |
| 2804 | if (CallerF.hasFnAttribute("interrupt" )) |
| 2805 | return false; |
| 2806 | |
| 2807 | // Also avoid sibcall optimization if either caller or callee uses struct |
| 2808 | // return semantics. |
| 2809 | if (isCalleeStructRet || isCallerStructRet) |
| 2810 | return false; |
| 2811 | |
| 2812 | // Externally-defined functions with weak linkage should not be |
| 2813 | // tail-called on ARM when the OS does not support dynamic |
| 2814 | // pre-emption of symbols, as the AAELF spec requires normal calls |
| 2815 | // to undefined weak functions to be replaced with a NOP or jump to the |
| 2816 | // next instruction. The behaviour of branch instructions in this |
| 2817 | // situation (as used for tail calls) is implementation-defined, so we |
| 2818 | // cannot rely on the linker replacing the tail call with a return. |
| 2819 | if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { |
| 2820 | const GlobalValue *GV = G->getGlobal(); |
| 2821 | const Triple &TT = getTargetMachine().getTargetTriple(); |
| 2822 | if (GV->hasExternalWeakLinkage() && |
| 2823 | (!TT.isOSWindows() || TT.isOSBinFormatELF() || TT.isOSBinFormatMachO())) |
| 2824 | return false; |
| 2825 | } |
| 2826 | |
| 2827 | // Check that the call results are passed in the same way. |
| 2828 | LLVMContext &C = *DAG.getContext(); |
| 2829 | if (!CCState::resultsCompatible( |
| 2830 | getEffectiveCallingConv(CalleeCC, isVarArg), |
| 2831 | getEffectiveCallingConv(CallerCC, CallerF.isVarArg()), MF, C, Ins, |
| 2832 | CCAssignFnForReturn(CalleeCC, isVarArg), |
| 2833 | CCAssignFnForReturn(CallerCC, CallerF.isVarArg()))) |
| 2834 | return false; |
| 2835 | // The callee has to preserve all registers the caller needs to preserve. |
| 2836 | const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo(); |
| 2837 | const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); |
| 2838 | if (CalleeCC != CallerCC) { |
| 2839 | const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); |
| 2840 | if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved)) |
| 2841 | return false; |
| 2842 | } |
| 2843 | |
| 2844 | // If Caller's vararg or byval argument has been split between registers and |
| 2845 | // stack, do not perform tail call, since part of the argument is in caller's |
| 2846 | // local frame. |
| 2847 | const ARMFunctionInfo *AFI_Caller = MF.getInfo<ARMFunctionInfo>(); |
| 2848 | if (AFI_Caller->getArgRegsSaveSize()) |
| 2849 | return false; |
| 2850 | |
| 2851 | // If the callee takes no arguments then go on to check the results of the |
| 2852 | // call. |
| 2853 | if (!Outs.empty()) { |
| 2854 | // Check if stack adjustment is needed. For now, do not do this if any |
| 2855 | // argument is passed on the stack. |
| 2856 | SmallVector<CCValAssign, 16> ArgLocs; |
| 2857 | CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C); |
| 2858 | CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, isVarArg)); |
| 2859 | if (CCInfo.getNextStackOffset()) { |
| 2860 | // Check if the arguments are already laid out in the right way as |
| 2861 | // the caller's fixed stack objects. |
| 2862 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
| 2863 | const MachineRegisterInfo *MRI = &MF.getRegInfo(); |
| 2864 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
| 2865 | for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); |
| 2866 | i != e; |
| 2867 | ++i, ++realArgIdx) { |
| 2868 | CCValAssign &VA = ArgLocs[i]; |
| 2869 | EVT RegVT = VA.getLocVT(); |
| 2870 | SDValue Arg = OutVals[realArgIdx]; |
| 2871 | ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; |
| 2872 | if (VA.getLocInfo() == CCValAssign::Indirect) |
| 2873 | return false; |
| 2874 | if (VA.needsCustom() && (RegVT == MVT::f64 || RegVT == MVT::v2f64)) { |
| 2875 | // f64 and vector types are split into multiple registers or |
| 2876 | // register/stack-slot combinations. The types will not match |
| 2877 | // the registers; give up on memory f64 refs until we figure |
| 2878 | // out what to do about this. |
| 2879 | if (!VA.isRegLoc()) |
| 2880 | return false; |
| 2881 | if (!ArgLocs[++i].isRegLoc()) |
| 2882 | return false; |
| 2883 | if (RegVT == MVT::v2f64) { |
| 2884 | if (!ArgLocs[++i].isRegLoc()) |
| 2885 | return false; |
| 2886 | if (!ArgLocs[++i].isRegLoc()) |
| 2887 | return false; |
| 2888 | } |
| 2889 | } else if (!VA.isRegLoc()) { |
| 2890 | if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, |
| 2891 | MFI, MRI, TII)) |
| 2892 | return false; |
| 2893 | } |
| 2894 | } |
| 2895 | } |
| 2896 | |
| 2897 | const MachineRegisterInfo &MRI = MF.getRegInfo(); |
| 2898 | if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals)) |
| 2899 | return false; |
| 2900 | } |
| 2901 | |
| 2902 | return true; |
| 2903 | } |
| 2904 | |
| 2905 | bool |
| 2906 | ARMTargetLowering::CanLowerReturn(CallingConv::ID CallConv, |
| 2907 | MachineFunction &MF, bool isVarArg, |
| 2908 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
| 2909 | LLVMContext &Context) const { |
| 2910 | SmallVector<CCValAssign, 16> RVLocs; |
| 2911 | CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); |
| 2912 | return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg)); |
| 2913 | } |
| 2914 | |
| 2915 | static SDValue LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps, |
| 2916 | const SDLoc &DL, SelectionDAG &DAG) { |
| 2917 | const MachineFunction &MF = DAG.getMachineFunction(); |
| 2918 | const Function &F = MF.getFunction(); |
| 2919 | |
| 2920 | StringRef IntKind = F.getFnAttribute("interrupt" ).getValueAsString(); |
| 2921 | |
| 2922 | // See ARM ARM v7 B1.8.3. On exception entry LR is set to a possibly offset |
| 2923 | // version of the "preferred return address". These offsets affect the return |
| 2924 | // instruction if this is a return from PL1 without hypervisor extensions. |
| 2925 | // IRQ/FIQ: +4 "subs pc, lr, #4" |
| 2926 | // SWI: 0 "subs pc, lr, #0" |
| 2927 | // ABORT: +4 "subs pc, lr, #4" |
| 2928 | // UNDEF: +4/+2 "subs pc, lr, #0" |
| 2929 | // UNDEF varies depending on where the exception came from ARM or Thumb |
| 2930 | // mode. Alongside GCC, we throw our hands up in disgust and pretend it's 0. |
| 2931 | |
| 2932 | int64_t LROffset; |
| 2933 | if (IntKind == "" || IntKind == "IRQ" || IntKind == "FIQ" || |
| 2934 | IntKind == "ABORT" ) |
| 2935 | LROffset = 4; |
| 2936 | else if (IntKind == "SWI" || IntKind == "UNDEF" ) |
| 2937 | LROffset = 0; |
| 2938 | else |
| 2939 | report_fatal_error("Unsupported interrupt attribute. If present, value " |
| 2940 | "must be one of: IRQ, FIQ, SWI, ABORT or UNDEF" ); |
| 2941 | |
| 2942 | RetOps.insert(RetOps.begin() + 1, |
| 2943 | DAG.getConstant(LROffset, DL, MVT::i32, false)); |
| 2944 | |
| 2945 | return DAG.getNode(ARMISD::INTRET_FLAG, DL, MVT::Other, RetOps); |
| 2946 | } |
| 2947 | |
| 2948 | SDValue |
| 2949 | ARMTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, |
| 2950 | bool isVarArg, |
| 2951 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
| 2952 | const SmallVectorImpl<SDValue> &OutVals, |
| 2953 | const SDLoc &dl, SelectionDAG &DAG) const { |
| 2954 | // CCValAssign - represent the assignment of the return value to a location. |
| 2955 | SmallVector<CCValAssign, 16> RVLocs; |
| 2956 | |
| 2957 | // CCState - Info about the registers and stack slots. |
| 2958 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, |
| 2959 | *DAG.getContext()); |
| 2960 | |
| 2961 | // Analyze outgoing return values. |
| 2962 | CCInfo.AnalyzeReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg)); |
| 2963 | |
| 2964 | SDValue Flag; |
| 2965 | SmallVector<SDValue, 4> RetOps; |
| 2966 | RetOps.push_back(Chain); // Operand #0 = Chain (updated below) |
| 2967 | bool isLittleEndian = Subtarget->isLittle(); |
| 2968 | |
| 2969 | MachineFunction &MF = DAG.getMachineFunction(); |
| 2970 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 2971 | AFI->setReturnRegsCount(RVLocs.size()); |
| 2972 | |
| 2973 | // Report error if cmse entry function returns structure through first ptr arg. |
| 2974 | if (AFI->isCmseNSEntryFunction() && MF.getFunction().hasStructRetAttr()) { |
| 2975 | // Note: using an empty SDLoc(), as the first line of the function is a |
| 2976 | // better place to report than the last line. |
| 2977 | DiagnosticInfoUnsupported Diag( |
| 2978 | DAG.getMachineFunction().getFunction(), |
| 2979 | "secure entry function would return value through pointer" , |
| 2980 | SDLoc().getDebugLoc()); |
| 2981 | DAG.getContext()->diagnose(Diag); |
| 2982 | } |
| 2983 | |
| 2984 | // Copy the result values into the output registers. |
| 2985 | for (unsigned i = 0, realRVLocIdx = 0; |
| 2986 | i != RVLocs.size(); |
| 2987 | ++i, ++realRVLocIdx) { |
| 2988 | CCValAssign &VA = RVLocs[i]; |
| 2989 | assert(VA.isRegLoc() && "Can only return in registers!" ); |
| 2990 | |
| 2991 | SDValue Arg = OutVals[realRVLocIdx]; |
| 2992 | bool ReturnF16 = false; |
| 2993 | |
| 2994 | if (Subtarget->hasFullFP16() && Subtarget->isTargetHardFloat()) { |
| 2995 | // Half-precision return values can be returned like this: |
| 2996 | // |
| 2997 | // t11 f16 = fadd ... |
| 2998 | // t12: i16 = bitcast t11 |
| 2999 | // t13: i32 = zero_extend t12 |
| 3000 | // t14: f32 = bitcast t13 <~~~~~~~ Arg |
| 3001 | // |
| 3002 | // to avoid code generation for bitcasts, we simply set Arg to the node |
| 3003 | // that produces the f16 value, t11 in this case. |
| 3004 | // |
| 3005 | if (Arg.getValueType() == MVT::f32 && Arg.getOpcode() == ISD::BITCAST) { |
| 3006 | SDValue ZE = Arg.getOperand(0); |
| 3007 | if (ZE.getOpcode() == ISD::ZERO_EXTEND && ZE.getValueType() == MVT::i32) { |
| 3008 | SDValue BC = ZE.getOperand(0); |
| 3009 | if (BC.getOpcode() == ISD::BITCAST && BC.getValueType() == MVT::i16) { |
| 3010 | Arg = BC.getOperand(0); |
| 3011 | ReturnF16 = true; |
| 3012 | } |
| 3013 | } |
| 3014 | } |
| 3015 | } |
| 3016 | |
| 3017 | switch (VA.getLocInfo()) { |
| 3018 | default: llvm_unreachable("Unknown loc info!" ); |
| 3019 | case CCValAssign::Full: break; |
| 3020 | case CCValAssign::BCvt: |
| 3021 | if (!ReturnF16) |
| 3022 | Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); |
| 3023 | break; |
| 3024 | } |
| 3025 | |
| 3026 | // Mask f16 arguments if this is a CMSE nonsecure entry. |
| 3027 | auto RetVT = Outs[realRVLocIdx].ArgVT; |
| 3028 | if (AFI->isCmseNSEntryFunction() && (RetVT == MVT::f16)) { |
| 3029 | if (VA.needsCustom() && VA.getValVT() == MVT::f16) { |
| 3030 | Arg = MoveFromHPR(dl, DAG, VA.getLocVT(), VA.getValVT(), Arg); |
| 3031 | } else { |
| 3032 | auto LocBits = VA.getLocVT().getSizeInBits(); |
| 3033 | auto MaskValue = APInt::getLowBitsSet(LocBits, RetVT.getSizeInBits()); |
| 3034 | SDValue Mask = |
| 3035 | DAG.getConstant(MaskValue, dl, MVT::getIntegerVT(LocBits)); |
| 3036 | Arg = DAG.getNode(ISD::BITCAST, dl, MVT::getIntegerVT(LocBits), Arg); |
| 3037 | Arg = DAG.getNode(ISD::AND, dl, MVT::getIntegerVT(LocBits), Arg, Mask); |
| 3038 | Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); |
| 3039 | } |
| 3040 | } |
| 3041 | |
| 3042 | if (VA.needsCustom() && |
| 3043 | (VA.getLocVT() == MVT::v2f64 || VA.getLocVT() == MVT::f64)) { |
| 3044 | if (VA.getLocVT() == MVT::v2f64) { |
| 3045 | // Extract the first half and return it in two registers. |
| 3046 | SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, |
| 3047 | DAG.getConstant(0, dl, MVT::i32)); |
| 3048 | SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl, |
| 3049 | DAG.getVTList(MVT::i32, MVT::i32), Half); |
| 3050 | |
| 3051 | Chain = |
| 3052 | DAG.getCopyToReg(Chain, dl, VA.getLocReg(), |
| 3053 | HalfGPRs.getValue(isLittleEndian ? 0 : 1), Flag); |
| 3054 | Flag = Chain.getValue(1); |
| 3055 | RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); |
| 3056 | VA = RVLocs[++i]; // skip ahead to next loc |
| 3057 | Chain = |
| 3058 | DAG.getCopyToReg(Chain, dl, VA.getLocReg(), |
| 3059 | HalfGPRs.getValue(isLittleEndian ? 1 : 0), Flag); |
| 3060 | Flag = Chain.getValue(1); |
| 3061 | RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); |
| 3062 | VA = RVLocs[++i]; // skip ahead to next loc |
| 3063 | |
| 3064 | // Extract the 2nd half and fall through to handle it as an f64 value. |
| 3065 | Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, |
| 3066 | DAG.getConstant(1, dl, MVT::i32)); |
| 3067 | } |
| 3068 | // Legalize ret f64 -> ret 2 x i32. We always have fmrrd if f64 is |
| 3069 | // available. |
| 3070 | SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, |
| 3071 | DAG.getVTList(MVT::i32, MVT::i32), Arg); |
| 3072 | Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), |
| 3073 | fmrrd.getValue(isLittleEndian ? 0 : 1), Flag); |
| 3074 | Flag = Chain.getValue(1); |
| 3075 | RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); |
| 3076 | VA = RVLocs[++i]; // skip ahead to next loc |
| 3077 | Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), |
| 3078 | fmrrd.getValue(isLittleEndian ? 1 : 0), Flag); |
| 3079 | } else |
| 3080 | Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); |
| 3081 | |
| 3082 | // Guarantee that all emitted copies are |
| 3083 | // stuck together, avoiding something bad. |
| 3084 | Flag = Chain.getValue(1); |
| 3085 | RetOps.push_back(DAG.getRegister( |
| 3086 | VA.getLocReg(), ReturnF16 ? Arg.getValueType() : VA.getLocVT())); |
| 3087 | } |
| 3088 | const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo(); |
| 3089 | const MCPhysReg *I = |
| 3090 | TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction()); |
| 3091 | if (I) { |
| 3092 | for (; *I; ++I) { |
| 3093 | if (ARM::GPRRegClass.contains(*I)) |
| 3094 | RetOps.push_back(DAG.getRegister(*I, MVT::i32)); |
| 3095 | else if (ARM::DPRRegClass.contains(*I)) |
| 3096 | RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64))); |
| 3097 | else |
| 3098 | llvm_unreachable("Unexpected register class in CSRsViaCopy!" ); |
| 3099 | } |
| 3100 | } |
| 3101 | |
| 3102 | // Update chain and glue. |
| 3103 | RetOps[0] = Chain; |
| 3104 | if (Flag.getNode()) |
| 3105 | RetOps.push_back(Flag); |
| 3106 | |
| 3107 | // CPUs which aren't M-class use a special sequence to return from |
| 3108 | // exceptions (roughly, any instruction setting pc and cpsr simultaneously, |
| 3109 | // though we use "subs pc, lr, #N"). |
| 3110 | // |
| 3111 | // M-class CPUs actually use a normal return sequence with a special |
| 3112 | // (hardware-provided) value in LR, so the normal code path works. |
| 3113 | if (DAG.getMachineFunction().getFunction().hasFnAttribute("interrupt" ) && |
| 3114 | !Subtarget->isMClass()) { |
| 3115 | if (Subtarget->isThumb1Only()) |
| 3116 | report_fatal_error("interrupt attribute is not supported in Thumb1" ); |
| 3117 | return LowerInterruptReturn(RetOps, dl, DAG); |
| 3118 | } |
| 3119 | |
| 3120 | ARMISD::NodeType RetNode = AFI->isCmseNSEntryFunction() ? ARMISD::SERET_FLAG : |
| 3121 | ARMISD::RET_FLAG; |
| 3122 | return DAG.getNode(RetNode, dl, MVT::Other, RetOps); |
| 3123 | } |
| 3124 | |
| 3125 | bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const { |
| 3126 | if (N->getNumValues() != 1) |
| 3127 | return false; |
| 3128 | if (!N->hasNUsesOfValue(1, 0)) |
| 3129 | return false; |
| 3130 | |
| 3131 | SDValue TCChain = Chain; |
| 3132 | SDNode *Copy = *N->use_begin(); |
| 3133 | if (Copy->getOpcode() == ISD::CopyToReg) { |
| 3134 | // If the copy has a glue operand, we conservatively assume it isn't safe to |
| 3135 | // perform a tail call. |
| 3136 | if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue) |
| 3137 | return false; |
| 3138 | TCChain = Copy->getOperand(0); |
| 3139 | } else if (Copy->getOpcode() == ARMISD::VMOVRRD) { |
| 3140 | SDNode *VMov = Copy; |
| 3141 | // f64 returned in a pair of GPRs. |
| 3142 | SmallPtrSet<SDNode*, 2> Copies; |
| 3143 | for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end(); |
| 3144 | UI != UE; ++UI) { |
| 3145 | if (UI->getOpcode() != ISD::CopyToReg) |
| 3146 | return false; |
| 3147 | Copies.insert(*UI); |
| 3148 | } |
| 3149 | if (Copies.size() > 2) |
| 3150 | return false; |
| 3151 | |
| 3152 | for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end(); |
| 3153 | UI != UE; ++UI) { |
| 3154 | SDValue UseChain = UI->getOperand(0); |
| 3155 | if (Copies.count(UseChain.getNode())) |
| 3156 | // Second CopyToReg |
| 3157 | Copy = *UI; |
| 3158 | else { |
| 3159 | // We are at the top of this chain. |
| 3160 | // If the copy has a glue operand, we conservatively assume it |
| 3161 | // isn't safe to perform a tail call. |
| 3162 | if (UI->getOperand(UI->getNumOperands()-1).getValueType() == MVT::Glue) |
| 3163 | return false; |
| 3164 | // First CopyToReg |
| 3165 | TCChain = UseChain; |
| 3166 | } |
| 3167 | } |
| 3168 | } else if (Copy->getOpcode() == ISD::BITCAST) { |
| 3169 | // f32 returned in a single GPR. |
| 3170 | if (!Copy->hasOneUse()) |
| 3171 | return false; |
| 3172 | Copy = *Copy->use_begin(); |
| 3173 | if (Copy->getOpcode() != ISD::CopyToReg || !Copy->hasNUsesOfValue(1, 0)) |
| 3174 | return false; |
| 3175 | // If the copy has a glue operand, we conservatively assume it isn't safe to |
| 3176 | // perform a tail call. |
| 3177 | if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue) |
| 3178 | return false; |
| 3179 | TCChain = Copy->getOperand(0); |
| 3180 | } else { |
| 3181 | return false; |
| 3182 | } |
| 3183 | |
| 3184 | bool HasRet = false; |
| 3185 | for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end(); |
| 3186 | UI != UE; ++UI) { |
| 3187 | if (UI->getOpcode() != ARMISD::RET_FLAG && |
| 3188 | UI->getOpcode() != ARMISD::INTRET_FLAG) |
| 3189 | return false; |
| 3190 | HasRet = true; |
| 3191 | } |
| 3192 | |
| 3193 | if (!HasRet) |
| 3194 | return false; |
| 3195 | |
| 3196 | Chain = TCChain; |
| 3197 | return true; |
| 3198 | } |
| 3199 | |
| 3200 | bool ARMTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { |
| 3201 | if (!Subtarget->supportsTailCall()) |
| 3202 | return false; |
| 3203 | |
| 3204 | if (!CI->isTailCall()) |
| 3205 | return false; |
| 3206 | |
| 3207 | return true; |
| 3208 | } |
| 3209 | |
| 3210 | // Trying to write a 64 bit value so need to split into two 32 bit values first, |
| 3211 | // and pass the lower and high parts through. |
| 3212 | static SDValue LowerWRITE_REGISTER(SDValue Op, SelectionDAG &DAG) { |
| 3213 | SDLoc DL(Op); |
| 3214 | SDValue WriteValue = Op->getOperand(2); |
| 3215 | |
| 3216 | // This function is only supposed to be called for i64 type argument. |
| 3217 | assert(WriteValue.getValueType() == MVT::i64 |
| 3218 | && "LowerWRITE_REGISTER called for non-i64 type argument." ); |
| 3219 | |
| 3220 | SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, WriteValue, |
| 3221 | DAG.getConstant(0, DL, MVT::i32)); |
| 3222 | SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, WriteValue, |
| 3223 | DAG.getConstant(1, DL, MVT::i32)); |
| 3224 | SDValue Ops[] = { Op->getOperand(0), Op->getOperand(1), Lo, Hi }; |
| 3225 | return DAG.getNode(ISD::WRITE_REGISTER, DL, MVT::Other, Ops); |
| 3226 | } |
| 3227 | |
| 3228 | // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as |
| 3229 | // their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is |
| 3230 | // one of the above mentioned nodes. It has to be wrapped because otherwise |
| 3231 | // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only |
| 3232 | // be used to form addressing mode. These wrapped nodes will be selected |
| 3233 | // into MOVi. |
| 3234 | SDValue ARMTargetLowering::LowerConstantPool(SDValue Op, |
| 3235 | SelectionDAG &DAG) const { |
| 3236 | EVT PtrVT = Op.getValueType(); |
| 3237 | // FIXME there is no actual debug info here |
| 3238 | SDLoc dl(Op); |
| 3239 | ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); |
| 3240 | SDValue Res; |
| 3241 | |
| 3242 | // When generating execute-only code Constant Pools must be promoted to the |
| 3243 | // global data section. It's a bit ugly that we can't share them across basic |
| 3244 | // blocks, but this way we guarantee that execute-only behaves correct with |
| 3245 | // position-independent addressing modes. |
| 3246 | if (Subtarget->genExecuteOnly()) { |
| 3247 | auto AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>(); |
| 3248 | auto T = const_cast<Type*>(CP->getType()); |
| 3249 | auto C = const_cast<Constant*>(CP->getConstVal()); |
| 3250 | auto M = const_cast<Module*>(DAG.getMachineFunction(). |
| 3251 | getFunction().getParent()); |
| 3252 | auto GV = new GlobalVariable( |
| 3253 | *M, T, /*isConstant=*/true, GlobalVariable::InternalLinkage, C, |
| 3254 | Twine(DAG.getDataLayout().getPrivateGlobalPrefix()) + "CP" + |
| 3255 | Twine(DAG.getMachineFunction().getFunctionNumber()) + "_" + |
| 3256 | Twine(AFI->createPICLabelUId()) |
| 3257 | ); |
| 3258 | SDValue GA = DAG.getTargetGlobalAddress(dyn_cast<GlobalValue>(GV), |
| 3259 | dl, PtrVT); |
| 3260 | return LowerGlobalAddress(GA, DAG); |
| 3261 | } |
| 3262 | |
| 3263 | if (CP->isMachineConstantPoolEntry()) |
| 3264 | Res = |
| 3265 | DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, CP->getAlign()); |
| 3266 | else |
| 3267 | Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, CP->getAlign()); |
| 3268 | return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res); |
| 3269 | } |
| 3270 | |
| 3271 | unsigned ARMTargetLowering::getJumpTableEncoding() const { |
| 3272 | return MachineJumpTableInfo::EK_Inline; |
| 3273 | } |
| 3274 | |
| 3275 | SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op, |
| 3276 | SelectionDAG &DAG) const { |
| 3277 | MachineFunction &MF = DAG.getMachineFunction(); |
| 3278 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 3279 | unsigned ARMPCLabelIndex = 0; |
| 3280 | SDLoc DL(Op); |
| 3281 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
| 3282 | const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); |
| 3283 | SDValue CPAddr; |
| 3284 | bool IsPositionIndependent = isPositionIndependent() || Subtarget->isROPI(); |
| 3285 | if (!IsPositionIndependent) { |
| 3286 | CPAddr = DAG.getTargetConstantPool(BA, PtrVT, Align(4)); |
| 3287 | } else { |
| 3288 | unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; |
| 3289 | ARMPCLabelIndex = AFI->createPICLabelUId(); |
| 3290 | ARMConstantPoolValue *CPV = |
| 3291 | ARMConstantPoolConstant::Create(BA, ARMPCLabelIndex, |
| 3292 | ARMCP::CPBlockAddress, PCAdj); |
| 3293 | CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, Align(4)); |
| 3294 | } |
| 3295 | CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr); |
| 3296 | SDValue Result = DAG.getLoad( |
| 3297 | PtrVT, DL, DAG.getEntryNode(), CPAddr, |
| 3298 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); |
| 3299 | if (!IsPositionIndependent) |
| 3300 | return Result; |
| 3301 | SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, DL, MVT::i32); |
| 3302 | return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel); |
| 3303 | } |
| 3304 | |
| 3305 | /// Convert a TLS address reference into the correct sequence of loads |
| 3306 | /// and calls to compute the variable's address for Darwin, and return an |
| 3307 | /// SDValue containing the final node. |
| 3308 | |
| 3309 | /// Darwin only has one TLS scheme which must be capable of dealing with the |
| 3310 | /// fully general situation, in the worst case. This means: |
| 3311 | /// + "extern __thread" declaration. |
| 3312 | /// + Defined in a possibly unknown dynamic library. |
| 3313 | /// |
| 3314 | /// The general system is that each __thread variable has a [3 x i32] descriptor |
| 3315 | /// which contains information used by the runtime to calculate the address. The |
| 3316 | /// only part of this the compiler needs to know about is the first word, which |
| 3317 | /// contains a function pointer that must be called with the address of the |
| 3318 | /// entire descriptor in "r0". |
| 3319 | /// |
| 3320 | /// Since this descriptor may be in a different unit, in general access must |
| 3321 | /// proceed along the usual ARM rules. A common sequence to produce is: |
| 3322 | /// |
| 3323 | /// movw rT1, :lower16:_var$non_lazy_ptr |
| 3324 | /// movt rT1, :upper16:_var$non_lazy_ptr |
| 3325 | /// ldr r0, [rT1] |
| 3326 | /// ldr rT2, [r0] |
| 3327 | /// blx rT2 |
| 3328 | /// [...address now in r0...] |
| 3329 | SDValue |
| 3330 | ARMTargetLowering::LowerGlobalTLSAddressDarwin(SDValue Op, |
| 3331 | SelectionDAG &DAG) const { |
| 3332 | assert(Subtarget->isTargetDarwin() && |
| 3333 | "This function expects a Darwin target" ); |
| 3334 | SDLoc DL(Op); |
| 3335 | |
| 3336 | // First step is to get the address of the actua global symbol. This is where |
| 3337 | // the TLS descriptor lives. |
| 3338 | SDValue DescAddr = LowerGlobalAddressDarwin(Op, DAG); |
| 3339 | |
| 3340 | // The first entry in the descriptor is a function pointer that we must call |
| 3341 | // to obtain the address of the variable. |
| 3342 | SDValue Chain = DAG.getEntryNode(); |
| 3343 | SDValue FuncTLVGet = DAG.getLoad( |
| 3344 | MVT::i32, DL, Chain, DescAddr, |
| 3345 | MachinePointerInfo::getGOT(DAG.getMachineFunction()), Align(4), |
| 3346 | MachineMemOperand::MONonTemporal | MachineMemOperand::MODereferenceable | |
| 3347 | MachineMemOperand::MOInvariant); |
| 3348 | Chain = FuncTLVGet.getValue(1); |
| 3349 | |
| 3350 | MachineFunction &F = DAG.getMachineFunction(); |
| 3351 | MachineFrameInfo &MFI = F.getFrameInfo(); |
| 3352 | MFI.setAdjustsStack(true); |
| 3353 | |
| 3354 | // TLS calls preserve all registers except those that absolutely must be |
| 3355 | // trashed: R0 (it takes an argument), LR (it's a call) and CPSR (let's not be |
| 3356 | // silly). |
| 3357 | auto TRI = |
| 3358 | getTargetMachine().getSubtargetImpl(F.getFunction())->getRegisterInfo(); |
| 3359 | auto ARI = static_cast<const ARMRegisterInfo *>(TRI); |
| 3360 | const uint32_t *Mask = ARI->getTLSCallPreservedMask(DAG.getMachineFunction()); |
| 3361 | |
| 3362 | // Finally, we can make the call. This is just a degenerate version of a |
| 3363 | // normal AArch64 call node: r0 takes the address of the descriptor, and |
| 3364 | // returns the address of the variable in this thread. |
| 3365 | Chain = DAG.getCopyToReg(Chain, DL, ARM::R0, DescAddr, SDValue()); |
| 3366 | Chain = |
| 3367 | DAG.getNode(ARMISD::CALL, DL, DAG.getVTList(MVT::Other, MVT::Glue), |
| 3368 | Chain, FuncTLVGet, DAG.getRegister(ARM::R0, MVT::i32), |
| 3369 | DAG.getRegisterMask(Mask), Chain.getValue(1)); |
| 3370 | return DAG.getCopyFromReg(Chain, DL, ARM::R0, MVT::i32, Chain.getValue(1)); |
| 3371 | } |
| 3372 | |
| 3373 | SDValue |
| 3374 | ARMTargetLowering::LowerGlobalTLSAddressWindows(SDValue Op, |
| 3375 | SelectionDAG &DAG) const { |
| 3376 | assert(Subtarget->isTargetWindows() && "Windows specific TLS lowering" ); |
| 3377 | |
| 3378 | SDValue Chain = DAG.getEntryNode(); |
| 3379 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
| 3380 | SDLoc DL(Op); |
| 3381 | |
| 3382 | // Load the current TEB (thread environment block) |
| 3383 | SDValue Ops[] = {Chain, |
| 3384 | DAG.getTargetConstant(Intrinsic::arm_mrc, DL, MVT::i32), |
| 3385 | DAG.getTargetConstant(15, DL, MVT::i32), |
| 3386 | DAG.getTargetConstant(0, DL, MVT::i32), |
| 3387 | DAG.getTargetConstant(13, DL, MVT::i32), |
| 3388 | DAG.getTargetConstant(0, DL, MVT::i32), |
| 3389 | DAG.getTargetConstant(2, DL, MVT::i32)}; |
| 3390 | SDValue CurrentTEB = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, |
| 3391 | DAG.getVTList(MVT::i32, MVT::Other), Ops); |
| 3392 | |
| 3393 | SDValue TEB = CurrentTEB.getValue(0); |
| 3394 | Chain = CurrentTEB.getValue(1); |
| 3395 | |
| 3396 | // Load the ThreadLocalStoragePointer from the TEB |
| 3397 | // A pointer to the TLS array is located at offset 0x2c from the TEB. |
| 3398 | SDValue TLSArray = |
| 3399 | DAG.getNode(ISD::ADD, DL, PtrVT, TEB, DAG.getIntPtrConstant(0x2c, DL)); |
| 3400 | TLSArray = DAG.getLoad(PtrVT, DL, Chain, TLSArray, MachinePointerInfo()); |
| 3401 | |
| 3402 | // The pointer to the thread's TLS data area is at the TLS Index scaled by 4 |
| 3403 | // offset into the TLSArray. |
| 3404 | |
| 3405 | // Load the TLS index from the C runtime |
| 3406 | SDValue TLSIndex = |
| 3407 | DAG.getTargetExternalSymbol("_tls_index" , PtrVT, ARMII::MO_NO_FLAG); |
| 3408 | TLSIndex = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, TLSIndex); |
| 3409 | TLSIndex = DAG.getLoad(PtrVT, DL, Chain, TLSIndex, MachinePointerInfo()); |
| 3410 | |
| 3411 | SDValue Slot = DAG.getNode(ISD::SHL, DL, PtrVT, TLSIndex, |
| 3412 | DAG.getConstant(2, DL, MVT::i32)); |
| 3413 | SDValue TLS = DAG.getLoad(PtrVT, DL, Chain, |
| 3414 | DAG.getNode(ISD::ADD, DL, PtrVT, TLSArray, Slot), |
| 3415 | MachinePointerInfo()); |
| 3416 | |
| 3417 | // Get the offset of the start of the .tls section (section base) |
| 3418 | const auto *GA = cast<GlobalAddressSDNode>(Op); |
| 3419 | auto *CPV = ARMConstantPoolConstant::Create(GA->getGlobal(), ARMCP::SECREL); |
| 3420 | SDValue Offset = DAG.getLoad( |
| 3421 | PtrVT, DL, Chain, |
| 3422 | DAG.getNode(ARMISD::Wrapper, DL, MVT::i32, |
| 3423 | DAG.getTargetConstantPool(CPV, PtrVT, Align(4))), |
| 3424 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); |
| 3425 | |
| 3426 | return DAG.getNode(ISD::ADD, DL, PtrVT, TLS, Offset); |
| 3427 | } |
| 3428 | |
| 3429 | // Lower ISD::GlobalTLSAddress using the "general dynamic" model |
| 3430 | SDValue |
| 3431 | ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, |
| 3432 | SelectionDAG &DAG) const { |
| 3433 | SDLoc dl(GA); |
| 3434 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
| 3435 | unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; |
| 3436 | MachineFunction &MF = DAG.getMachineFunction(); |
| 3437 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 3438 | unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); |
| 3439 | ARMConstantPoolValue *CPV = |
| 3440 | ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex, |
| 3441 | ARMCP::CPValue, PCAdj, ARMCP::TLSGD, true); |
| 3442 | SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, Align(4)); |
| 3443 | Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument); |
| 3444 | Argument = DAG.getLoad( |
| 3445 | PtrVT, dl, DAG.getEntryNode(), Argument, |
| 3446 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); |
| 3447 | SDValue Chain = Argument.getValue(1); |
| 3448 | |
| 3449 | SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32); |
| 3450 | Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel); |
| 3451 | |
| 3452 | // call __tls_get_addr. |
| 3453 | ArgListTy Args; |
| 3454 | ArgListEntry Entry; |
| 3455 | Entry.Node = Argument; |
| 3456 | Entry.Ty = (Type *) Type::getInt32Ty(*DAG.getContext()); |
| 3457 | Args.push_back(Entry); |
| 3458 | |
| 3459 | // FIXME: is there useful debug info available here? |
| 3460 | TargetLowering::CallLoweringInfo CLI(DAG); |
| 3461 | CLI.setDebugLoc(dl).setChain(Chain).setLibCallee( |
| 3462 | CallingConv::C, Type::getInt32Ty(*DAG.getContext()), |
| 3463 | DAG.getExternalSymbol("__tls_get_addr" , PtrVT), std::move(Args)); |
| 3464 | |
| 3465 | std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); |
| 3466 | return CallResult.first; |
| 3467 | } |
| 3468 | |
| 3469 | // Lower ISD::GlobalTLSAddress using the "initial exec" or |
| 3470 | // "local exec" model. |
| 3471 | SDValue |
| 3472 | ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA, |
| 3473 | SelectionDAG &DAG, |
| 3474 | TLSModel::Model model) const { |
| 3475 | const GlobalValue *GV = GA->getGlobal(); |
| 3476 | SDLoc dl(GA); |
| 3477 | SDValue Offset; |
| 3478 | SDValue Chain = DAG.getEntryNode(); |
| 3479 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
| 3480 | // Get the Thread Pointer |
| 3481 | SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); |
| 3482 | |
| 3483 | if (model == TLSModel::InitialExec) { |
| 3484 | MachineFunction &MF = DAG.getMachineFunction(); |
| 3485 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 3486 | unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); |
| 3487 | // Initial exec model. |
| 3488 | unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; |
| 3489 | ARMConstantPoolValue *CPV = |
| 3490 | ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex, |
| 3491 | ARMCP::CPValue, PCAdj, ARMCP::GOTTPOFF, |
| 3492 | true); |
| 3493 | Offset = DAG.getTargetConstantPool(CPV, PtrVT, Align(4)); |
| 3494 | Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); |
| 3495 | Offset = DAG.getLoad( |
| 3496 | PtrVT, dl, Chain, Offset, |
| 3497 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); |
| 3498 | Chain = Offset.getValue(1); |
| 3499 | |
| 3500 | SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32); |
| 3501 | Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel); |
| 3502 | |
| 3503 | Offset = DAG.getLoad( |
| 3504 | PtrVT, dl, Chain, Offset, |
| 3505 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); |
| 3506 | } else { |
| 3507 | // local exec model |
| 3508 | assert(model == TLSModel::LocalExec); |
| 3509 | ARMConstantPoolValue *CPV = |
| 3510 | ARMConstantPoolConstant::Create(GV, ARMCP::TPOFF); |
| 3511 | Offset = DAG.getTargetConstantPool(CPV, PtrVT, Align(4)); |
| 3512 | Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); |
| 3513 | Offset = DAG.getLoad( |
| 3514 | PtrVT, dl, Chain, Offset, |
| 3515 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); |
| 3516 | } |
| 3517 | |
| 3518 | // The address of the thread local variable is the add of the thread |
| 3519 | // pointer with the offset of the variable. |
| 3520 | return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset); |
| 3521 | } |
| 3522 | |
| 3523 | SDValue |
| 3524 | ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { |
| 3525 | GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); |
| 3526 | if (DAG.getTarget().useEmulatedTLS()) |
| 3527 | return LowerToTLSEmulatedModel(GA, DAG); |
| 3528 | |
| 3529 | if (Subtarget->isTargetDarwin()) |
| 3530 | return LowerGlobalTLSAddressDarwin(Op, DAG); |
| 3531 | |
| 3532 | if (Subtarget->isTargetWindows()) |
| 3533 | return LowerGlobalTLSAddressWindows(Op, DAG); |
| 3534 | |
| 3535 | // TODO: implement the "local dynamic" model |
| 3536 | assert(Subtarget->isTargetELF() && "Only ELF implemented here" ); |
| 3537 | TLSModel::Model model = getTargetMachine().getTLSModel(GA->getGlobal()); |
| 3538 | |
| 3539 | switch (model) { |
| 3540 | case TLSModel::GeneralDynamic: |
| 3541 | case TLSModel::LocalDynamic: |
| 3542 | return LowerToTLSGeneralDynamicModel(GA, DAG); |
| 3543 | case TLSModel::InitialExec: |
| 3544 | case TLSModel::LocalExec: |
| 3545 | return LowerToTLSExecModels(GA, DAG, model); |
| 3546 | } |
| 3547 | llvm_unreachable("bogus TLS model" ); |
| 3548 | } |
| 3549 | |
| 3550 | /// Return true if all users of V are within function F, looking through |
| 3551 | /// ConstantExprs. |
| 3552 | static bool allUsersAreInFunction(const Value *V, const Function *F) { |
| 3553 | SmallVector<const User*,4> Worklist; |
| 3554 | for (auto *U : V->users()) |
| 3555 | Worklist.push_back(U); |
| 3556 | while (!Worklist.empty()) { |
| 3557 | auto *U = Worklist.pop_back_val(); |
| 3558 | if (isa<ConstantExpr>(U)) { |
| 3559 | append_range(Worklist, U->users()); |
| 3560 | continue; |
| 3561 | } |
| 3562 | |
| 3563 | auto *I = dyn_cast<Instruction>(U); |
| 3564 | if (!I || I->getParent()->getParent() != F) |
| 3565 | return false; |
| 3566 | } |
| 3567 | return true; |
| 3568 | } |
| 3569 | |
| 3570 | static SDValue promoteToConstantPool(const ARMTargetLowering *TLI, |
| 3571 | const GlobalValue *GV, SelectionDAG &DAG, |
| 3572 | EVT PtrVT, const SDLoc &dl) { |
| 3573 | // If we're creating a pool entry for a constant global with unnamed address, |
| 3574 | // and the global is small enough, we can emit it inline into the constant pool |
| 3575 | // to save ourselves an indirection. |
| 3576 | // |
| 3577 | // This is a win if the constant is only used in one function (so it doesn't |
| 3578 | // need to be duplicated) or duplicating the constant wouldn't increase code |
| 3579 | // size (implying the constant is no larger than 4 bytes). |
| 3580 | const Function &F = DAG.getMachineFunction().getFunction(); |
| 3581 | |
| 3582 | // We rely on this decision to inline being idemopotent and unrelated to the |
| 3583 | // use-site. We know that if we inline a variable at one use site, we'll |
| 3584 | // inline it elsewhere too (and reuse the constant pool entry). Fast-isel |
| 3585 | // doesn't know about this optimization, so bail out if it's enabled else |
| 3586 | // we could decide to inline here (and thus never emit the GV) but require |
| 3587 | // the GV from fast-isel generated code. |
| 3588 | if (!EnableConstpoolPromotion || |
| 3589 | DAG.getMachineFunction().getTarget().Options.EnableFastISel) |
| 3590 | return SDValue(); |
| 3591 | |
| 3592 | auto *GVar = dyn_cast<GlobalVariable>(GV); |
| 3593 | if (!GVar || !GVar->hasInitializer() || |
| 3594 | !GVar->isConstant() || !GVar->hasGlobalUnnamedAddr() || |
| 3595 | !GVar->hasLocalLinkage()) |
| 3596 | return SDValue(); |
| 3597 | |
| 3598 | // If we inline a value that contains relocations, we move the relocations |
| 3599 | // from .data to .text. This is not allowed in position-independent code. |
| 3600 | auto *Init = GVar->getInitializer(); |
| 3601 | if ((TLI->isPositionIndependent() || TLI->getSubtarget()->isROPI()) && |
| 3602 | Init->needsRelocation()) |
| 3603 | return SDValue(); |
| 3604 | |
| 3605 | // The constant islands pass can only really deal with alignment requests |
| 3606 | // <= 4 bytes and cannot pad constants itself. Therefore we cannot promote |
| 3607 | // any type wanting greater alignment requirements than 4 bytes. We also |
| 3608 | // can only promote constants that are multiples of 4 bytes in size or |
| 3609 | // are paddable to a multiple of 4. Currently we only try and pad constants |
| 3610 | // that are strings for simplicity. |
| 3611 | auto *CDAInit = dyn_cast<ConstantDataArray>(Init); |
| 3612 | unsigned Size = DAG.getDataLayout().getTypeAllocSize(Init->getType()); |
| 3613 | Align PrefAlign = DAG.getDataLayout().getPreferredAlign(GVar); |
| 3614 | unsigned RequiredPadding = 4 - (Size % 4); |
| 3615 | bool PaddingPossible = |
| 3616 | RequiredPadding == 4 || (CDAInit && CDAInit->isString()); |
| 3617 | if (!PaddingPossible || PrefAlign > 4 || Size > ConstpoolPromotionMaxSize || |
| 3618 | Size == 0) |
| 3619 | return SDValue(); |
| 3620 | |
| 3621 | unsigned PaddedSize = Size + ((RequiredPadding == 4) ? 0 : RequiredPadding); |
| 3622 | MachineFunction &MF = DAG.getMachineFunction(); |
| 3623 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 3624 | |
| 3625 | // We can't bloat the constant pool too much, else the ConstantIslands pass |
| 3626 | // may fail to converge. If we haven't promoted this global yet (it may have |
| 3627 | // multiple uses), and promoting it would increase the constant pool size (Sz |
| 3628 | // > 4), ensure we have space to do so up to MaxTotal. |
| 3629 | if (!AFI->getGlobalsPromotedToConstantPool().count(GVar) && Size > 4) |
| 3630 | if (AFI->getPromotedConstpoolIncrease() + PaddedSize - 4 >= |
| 3631 | ConstpoolPromotionMaxTotal) |
| 3632 | return SDValue(); |
| 3633 | |
| 3634 | // This is only valid if all users are in a single function; we can't clone |
| 3635 | // the constant in general. The LLVM IR unnamed_addr allows merging |
| 3636 | // constants, but not cloning them. |
| 3637 | // |
| 3638 | // We could potentially allow cloning if we could prove all uses of the |
| 3639 | // constant in the current function don't care about the address, like |
| 3640 | // printf format strings. But that isn't implemented for now. |
| 3641 | if (!allUsersAreInFunction(GVar, &F)) |
| 3642 | return SDValue(); |
| 3643 | |
| 3644 | // We're going to inline this global. Pad it out if needed. |
| 3645 | if (RequiredPadding != 4) { |
| 3646 | StringRef S = CDAInit->getAsString(); |
| 3647 | |
| 3648 | SmallVector<uint8_t,16> V(S.size()); |
| 3649 | std::copy(S.bytes_begin(), S.bytes_end(), V.begin()); |
| 3650 | while (RequiredPadding--) |
| 3651 | V.push_back(0); |
| 3652 | Init = ConstantDataArray::get(*DAG.getContext(), V); |
| 3653 | } |
| 3654 | |
| 3655 | auto CPVal = ARMConstantPoolConstant::Create(GVar, Init); |
| 3656 | SDValue CPAddr = DAG.getTargetConstantPool(CPVal, PtrVT, Align(4)); |
| 3657 | if (!AFI->getGlobalsPromotedToConstantPool().count(GVar)) { |
| 3658 | AFI->markGlobalAsPromotedToConstantPool(GVar); |
| 3659 | AFI->setPromotedConstpoolIncrease(AFI->getPromotedConstpoolIncrease() + |
| 3660 | PaddedSize - 4); |
| 3661 | } |
| 3662 | ++NumConstpoolPromoted; |
| 3663 | return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); |
| 3664 | } |
| 3665 | |
| 3666 | bool ARMTargetLowering::isReadOnly(const GlobalValue *GV) const { |
| 3667 | if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV)) |
| 3668 | if (!(GV = GA->getBaseObject())) |
| 3669 | return false; |
| 3670 | if (const auto *V = dyn_cast<GlobalVariable>(GV)) |
| 3671 | return V->isConstant(); |
| 3672 | return isa<Function>(GV); |
| 3673 | } |
| 3674 | |
| 3675 | SDValue ARMTargetLowering::LowerGlobalAddress(SDValue Op, |
| 3676 | SelectionDAG &DAG) const { |
| 3677 | switch (Subtarget->getTargetTriple().getObjectFormat()) { |
| 3678 | default: llvm_unreachable("unknown object format" ); |
| 3679 | case Triple::COFF: |
| 3680 | return LowerGlobalAddressWindows(Op, DAG); |
| 3681 | case Triple::ELF: |
| 3682 | return LowerGlobalAddressELF(Op, DAG); |
| 3683 | case Triple::MachO: |
| 3684 | return LowerGlobalAddressDarwin(Op, DAG); |
| 3685 | } |
| 3686 | } |
| 3687 | |
| 3688 | SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op, |
| 3689 | SelectionDAG &DAG) const { |
| 3690 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
| 3691 | SDLoc dl(Op); |
| 3692 | const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); |
| 3693 | const TargetMachine &TM = getTargetMachine(); |
| 3694 | bool IsRO = isReadOnly(GV); |
| 3695 | |
| 3696 | // promoteToConstantPool only if not generating XO text section |
| 3697 | if (TM.shouldAssumeDSOLocal(*GV->getParent(), GV) && !Subtarget->genExecuteOnly()) |
| 3698 | if (SDValue V = promoteToConstantPool(this, GV, DAG, PtrVT, dl)) |
| 3699 | return V; |
| 3700 | |
| 3701 | if (isPositionIndependent()) { |
| 3702 | bool UseGOT_PREL = !TM.shouldAssumeDSOLocal(*GV->getParent(), GV); |
| 3703 | SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, |
| 3704 | UseGOT_PREL ? ARMII::MO_GOT : 0); |
| 3705 | SDValue Result = DAG.getNode(ARMISD::WrapperPIC, dl, PtrVT, G); |
| 3706 | if (UseGOT_PREL) |
| 3707 | Result = |
| 3708 | DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result, |
| 3709 | MachinePointerInfo::getGOT(DAG.getMachineFunction())); |
| 3710 | return Result; |
| 3711 | } else if (Subtarget->isROPI() && IsRO) { |
| 3712 | // PC-relative. |
| 3713 | SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT); |
| 3714 | SDValue Result = DAG.getNode(ARMISD::WrapperPIC, dl, PtrVT, G); |
| 3715 | return Result; |
| 3716 | } else if (Subtarget->isRWPI() && !IsRO) { |
| 3717 | // SB-relative. |
| 3718 | SDValue RelAddr; |
| 3719 | if (Subtarget->useMovt()) { |
| 3720 | ++NumMovwMovt; |
| 3721 | SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, ARMII::MO_SBREL); |
| 3722 | RelAddr = DAG.getNode(ARMISD::Wrapper, dl, PtrVT, G); |
| 3723 | } else { // use literal pool for address constant |
| 3724 | ARMConstantPoolValue *CPV = |
| 3725 | ARMConstantPoolConstant::Create(GV, ARMCP::SBREL); |
| 3726 | SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, Align(4)); |
| 3727 | CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); |
| 3728 | RelAddr = DAG.getLoad( |
| 3729 | PtrVT, dl, DAG.getEntryNode(), CPAddr, |
| 3730 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); |
| 3731 | } |
| 3732 | SDValue SB = DAG.getCopyFromReg(DAG.getEntryNode(), dl, ARM::R9, PtrVT); |
| 3733 | SDValue Result = DAG.getNode(ISD::ADD, dl, PtrVT, SB, RelAddr); |
| 3734 | return Result; |
| 3735 | } |
| 3736 | |
| 3737 | // If we have T2 ops, we can materialize the address directly via movt/movw |
| 3738 | // pair. This is always cheaper. |
| 3739 | if (Subtarget->useMovt()) { |
| 3740 | ++NumMovwMovt; |
| 3741 | // FIXME: Once remat is capable of dealing with instructions with register |
| 3742 | // operands, expand this into two nodes. |
| 3743 | return DAG.getNode(ARMISD::Wrapper, dl, PtrVT, |
| 3744 | DAG.getTargetGlobalAddress(GV, dl, PtrVT)); |
| 3745 | } else { |
| 3746 | SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, Align(4)); |
| 3747 | CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); |
| 3748 | return DAG.getLoad( |
| 3749 | PtrVT, dl, DAG.getEntryNode(), CPAddr, |
| 3750 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); |
| 3751 | } |
| 3752 | } |
| 3753 | |
| 3754 | SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op, |
| 3755 | SelectionDAG &DAG) const { |
| 3756 | assert(!Subtarget->isROPI() && !Subtarget->isRWPI() && |
| 3757 | "ROPI/RWPI not currently supported for Darwin" ); |
| 3758 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
| 3759 | SDLoc dl(Op); |
| 3760 | const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); |
| 3761 | |
| 3762 | if (Subtarget->useMovt()) |
| 3763 | ++NumMovwMovt; |
| 3764 | |
| 3765 | // FIXME: Once remat is capable of dealing with instructions with register |
| 3766 | // operands, expand this into multiple nodes |
| 3767 | unsigned Wrapper = |
| 3768 | isPositionIndependent() ? ARMISD::WrapperPIC : ARMISD::Wrapper; |
| 3769 | |
| 3770 | SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, ARMII::MO_NONLAZY); |
| 3771 | SDValue Result = DAG.getNode(Wrapper, dl, PtrVT, G); |
| 3772 | |
| 3773 | if (Subtarget->isGVIndirectSymbol(GV)) |
| 3774 | Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result, |
| 3775 | MachinePointerInfo::getGOT(DAG.getMachineFunction())); |
| 3776 | return Result; |
| 3777 | } |
| 3778 | |
| 3779 | SDValue ARMTargetLowering::LowerGlobalAddressWindows(SDValue Op, |
| 3780 | SelectionDAG &DAG) const { |
| 3781 | assert(Subtarget->isTargetWindows() && "non-Windows COFF is not supported" ); |
| 3782 | assert(Subtarget->useMovt() && |
| 3783 | "Windows on ARM expects to use movw/movt" ); |
| 3784 | assert(!Subtarget->isROPI() && !Subtarget->isRWPI() && |
| 3785 | "ROPI/RWPI not currently supported for Windows" ); |
| 3786 | |
| 3787 | const TargetMachine &TM = getTargetMachine(); |
| 3788 | const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); |
| 3789 | ARMII::TOF TargetFlags = ARMII::MO_NO_FLAG; |
| 3790 | if (GV->hasDLLImportStorageClass()) |
| 3791 | TargetFlags = ARMII::MO_DLLIMPORT; |
| 3792 | else if (!TM.shouldAssumeDSOLocal(*GV->getParent(), GV)) |
| 3793 | TargetFlags = ARMII::MO_COFFSTUB; |
| 3794 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
| 3795 | SDValue Result; |
| 3796 | SDLoc DL(Op); |
| 3797 | |
| 3798 | ++NumMovwMovt; |
| 3799 | |
| 3800 | // FIXME: Once remat is capable of dealing with instructions with register |
| 3801 | // operands, expand this into two nodes. |
| 3802 | Result = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, |
| 3803 | DAG.getTargetGlobalAddress(GV, DL, PtrVT, /*offset=*/0, |
| 3804 | TargetFlags)); |
| 3805 | if (TargetFlags & (ARMII::MO_DLLIMPORT | ARMII::MO_COFFSTUB)) |
| 3806 | Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result, |
| 3807 | MachinePointerInfo::getGOT(DAG.getMachineFunction())); |
| 3808 | return Result; |
| 3809 | } |
| 3810 | |
| 3811 | SDValue |
| 3812 | ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const { |
| 3813 | SDLoc dl(Op); |
| 3814 | SDValue Val = DAG.getConstant(0, dl, MVT::i32); |
| 3815 | return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl, |
| 3816 | DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0), |
| 3817 | Op.getOperand(1), Val); |
| 3818 | } |
| 3819 | |
| 3820 | SDValue |
| 3821 | ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const { |
| 3822 | SDLoc dl(Op); |
| 3823 | return DAG.getNode(ARMISD::EH_SJLJ_LONGJMP, dl, MVT::Other, Op.getOperand(0), |
| 3824 | Op.getOperand(1), DAG.getConstant(0, dl, MVT::i32)); |
| 3825 | } |
| 3826 | |
| 3827 | SDValue ARMTargetLowering::LowerEH_SJLJ_SETUP_DISPATCH(SDValue Op, |
| 3828 | SelectionDAG &DAG) const { |
| 3829 | SDLoc dl(Op); |
| 3830 | return DAG.getNode(ARMISD::EH_SJLJ_SETUP_DISPATCH, dl, MVT::Other, |
| 3831 | Op.getOperand(0)); |
| 3832 | } |
| 3833 | |
| 3834 | SDValue ARMTargetLowering::LowerINTRINSIC_VOID( |
| 3835 | SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget) const { |
| 3836 | unsigned IntNo = |
| 3837 | cast<ConstantSDNode>( |
| 3838 | Op.getOperand(Op.getOperand(0).getValueType() == MVT::Other)) |
| 3839 | ->getZExtValue(); |
| 3840 | switch (IntNo) { |
| 3841 | default: |
| 3842 | return SDValue(); // Don't custom lower most intrinsics. |
| 3843 | case Intrinsic::arm_gnu_eabi_mcount: { |
| 3844 | MachineFunction &MF = DAG.getMachineFunction(); |
| 3845 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
| 3846 | SDLoc dl(Op); |
| 3847 | SDValue Chain = Op.getOperand(0); |
| 3848 | // call "\01__gnu_mcount_nc" |
| 3849 | const ARMBaseRegisterInfo *ARI = Subtarget->getRegisterInfo(); |
| 3850 | const uint32_t *Mask = |
| 3851 | ARI->getCallPreservedMask(DAG.getMachineFunction(), CallingConv::C); |
| 3852 | assert(Mask && "Missing call preserved mask for calling convention" ); |
| 3853 | // Mark LR an implicit live-in. |
| 3854 | unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32)); |
| 3855 | SDValue ReturnAddress = |
| 3856 | DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, PtrVT); |
| 3857 | constexpr EVT ResultTys[] = {MVT::Other, MVT::Glue}; |
| 3858 | SDValue Callee = |
| 3859 | DAG.getTargetExternalSymbol("\01__gnu_mcount_nc" , PtrVT, 0); |
| 3860 | SDValue RegisterMask = DAG.getRegisterMask(Mask); |
| 3861 | if (Subtarget->isThumb()) |
| 3862 | return SDValue( |
| 3863 | DAG.getMachineNode( |
| 3864 | ARM::tBL_PUSHLR, dl, ResultTys, |
| 3865 | {ReturnAddress, DAG.getTargetConstant(ARMCC::AL, dl, PtrVT), |
| 3866 | DAG.getRegister(0, PtrVT), Callee, RegisterMask, Chain}), |
| 3867 | 0); |
| 3868 | return SDValue( |
| 3869 | DAG.getMachineNode(ARM::BL_PUSHLR, dl, ResultTys, |
| 3870 | {ReturnAddress, Callee, RegisterMask, Chain}), |
| 3871 | 0); |
| 3872 | } |
| 3873 | } |
| 3874 | } |
| 3875 | |
| 3876 | SDValue |
| 3877 | ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, |
| 3878 | const ARMSubtarget *Subtarget) const { |
| 3879 | unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); |
| 3880 | SDLoc dl(Op); |
| 3881 | switch (IntNo) { |
| 3882 | default: return SDValue(); // Don't custom lower most intrinsics. |
| 3883 | case Intrinsic::thread_pointer: { |
| 3884 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
| 3885 | return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); |
| 3886 | } |
| 3887 | case Intrinsic::arm_cls: { |
| 3888 | const SDValue &Operand = Op.getOperand(1); |
| 3889 | const EVT VTy = Op.getValueType(); |
| 3890 | SDValue SRA = |
| 3891 | DAG.getNode(ISD::SRA, dl, VTy, Operand, DAG.getConstant(31, dl, VTy)); |
| 3892 | SDValue XOR = DAG.getNode(ISD::XOR, dl, VTy, SRA, Operand); |
| 3893 | SDValue SHL = |
| 3894 | DAG.getNode(ISD::SHL, dl, VTy, XOR, DAG.getConstant(1, dl, VTy)); |
| 3895 | SDValue OR = |
| 3896 | DAG.getNode(ISD::OR, dl, VTy, SHL, DAG.getConstant(1, dl, VTy)); |
| 3897 | SDValue Result = DAG.getNode(ISD::CTLZ, dl, VTy, OR); |
| 3898 | return Result; |
| 3899 | } |
| 3900 | case Intrinsic::arm_cls64: { |
| 3901 | // cls(x) = if cls(hi(x)) != 31 then cls(hi(x)) |
| 3902 | // else 31 + clz(if hi(x) == 0 then lo(x) else not(lo(x))) |
| 3903 | const SDValue &Operand = Op.getOperand(1); |
| 3904 | const EVT VTy = Op.getValueType(); |
| 3905 | |
| 3906 | SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VTy, Operand, |
| 3907 | DAG.getConstant(1, dl, VTy)); |
| 3908 | SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VTy, Operand, |
| 3909 | DAG.getConstant(0, dl, VTy)); |
| 3910 | SDValue Constant0 = DAG.getConstant(0, dl, VTy); |
| 3911 | SDValue Constant1 = DAG.getConstant(1, dl, VTy); |
| 3912 | SDValue Constant31 = DAG.getConstant(31, dl, VTy); |
| 3913 | SDValue SRAHi = DAG.getNode(ISD::SRA, dl, VTy, Hi, Constant31); |
| 3914 | SDValue XORHi = DAG.getNode(ISD::XOR, dl, VTy, SRAHi, Hi); |
| 3915 | SDValue SHLHi = DAG.getNode(ISD::SHL, dl, VTy, XORHi, Constant1); |
| 3916 | SDValue ORHi = DAG.getNode(ISD::OR, dl, VTy, SHLHi, Constant1); |
| 3917 | SDValue CLSHi = DAG.getNode(ISD::CTLZ, dl, VTy, ORHi); |
| 3918 | SDValue CheckLo = |
| 3919 | DAG.getSetCC(dl, MVT::i1, CLSHi, Constant31, ISD::CondCode::SETEQ); |
| 3920 | SDValue HiIsZero = |
| 3921 | DAG.getSetCC(dl, MVT::i1, Hi, Constant0, ISD::CondCode::SETEQ); |
| 3922 | SDValue AdjustedLo = |
| 3923 | DAG.getSelect(dl, VTy, HiIsZero, Lo, DAG.getNOT(dl, Lo, VTy)); |
| 3924 | SDValue CLZAdjustedLo = DAG.getNode(ISD::CTLZ, dl, VTy, AdjustedLo); |
| 3925 | SDValue Result = |
| 3926 | DAG.getSelect(dl, VTy, CheckLo, |
| 3927 | DAG.getNode(ISD::ADD, dl, VTy, CLZAdjustedLo, Constant31), CLSHi); |
| 3928 | return Result; |
| 3929 | } |
| 3930 | case Intrinsic::eh_sjlj_lsda: { |
| 3931 | MachineFunction &MF = DAG.getMachineFunction(); |
| 3932 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 3933 | unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); |
| 3934 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); |
| 3935 | SDValue CPAddr; |
| 3936 | bool IsPositionIndependent = isPositionIndependent(); |
| 3937 | unsigned PCAdj = IsPositionIndependent ? (Subtarget->isThumb() ? 4 : 8) : 0; |
| 3938 | ARMConstantPoolValue *CPV = |
| 3939 | ARMConstantPoolConstant::Create(&MF.getFunction(), ARMPCLabelIndex, |
| 3940 | ARMCP::CPLSDA, PCAdj); |
| 3941 | CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, Align(4)); |
| 3942 | CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); |
| 3943 | SDValue Result = DAG.getLoad( |
| 3944 | PtrVT, dl, DAG.getEntryNode(), CPAddr, |
| 3945 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); |
| 3946 | |
| 3947 | if (IsPositionIndependent) { |
| 3948 | SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32); |
| 3949 | Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); |
| 3950 | } |
| 3951 | return Result; |
| 3952 | } |
| 3953 | case Intrinsic::arm_neon_vabs: |
| 3954 | return DAG.getNode(ISD::ABS, SDLoc(Op), Op.getValueType(), |
| 3955 | Op.getOperand(1)); |
| 3956 | case Intrinsic::arm_neon_vmulls: |
| 3957 | case Intrinsic::arm_neon_vmullu: { |
| 3958 | unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls) |
| 3959 | ? ARMISD::VMULLs : ARMISD::VMULLu; |
| 3960 | return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), |
| 3961 | Op.getOperand(1), Op.getOperand(2)); |
| 3962 | } |
| 3963 | case Intrinsic::arm_neon_vminnm: |
| 3964 | case Intrinsic::arm_neon_vmaxnm: { |
| 3965 | unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminnm) |
| 3966 | ? ISD::FMINNUM : ISD::FMAXNUM; |
| 3967 | return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), |
| 3968 | Op.getOperand(1), Op.getOperand(2)); |
| 3969 | } |
| 3970 | case Intrinsic::arm_neon_vminu: |
| 3971 | case Intrinsic::arm_neon_vmaxu: { |
| 3972 | if (Op.getValueType().isFloatingPoint()) |
| 3973 | return SDValue(); |
| 3974 | unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminu) |
| 3975 | ? ISD::UMIN : ISD::UMAX; |
| 3976 | return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), |
| 3977 | Op.getOperand(1), Op.getOperand(2)); |
| 3978 | } |
| 3979 | case Intrinsic::arm_neon_vmins: |
| 3980 | case Intrinsic::arm_neon_vmaxs: { |
| 3981 | // v{min,max}s is overloaded between signed integers and floats. |
| 3982 | if (!Op.getValueType().isFloatingPoint()) { |
| 3983 | unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins) |
| 3984 | ? ISD::SMIN : ISD::SMAX; |
| 3985 | return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), |
| 3986 | Op.getOperand(1), Op.getOperand(2)); |
| 3987 | } |
| 3988 | unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins) |
| 3989 | ? ISD::FMINIMUM : ISD::FMAXIMUM; |
| 3990 | return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), |
| 3991 | Op.getOperand(1), Op.getOperand(2)); |
| 3992 | } |
| 3993 | case Intrinsic::arm_neon_vtbl1: |
| 3994 | return DAG.getNode(ARMISD::VTBL1, SDLoc(Op), Op.getValueType(), |
| 3995 | Op.getOperand(1), Op.getOperand(2)); |
| 3996 | case Intrinsic::arm_neon_vtbl2: |
| 3997 | return DAG.getNode(ARMISD::VTBL2, SDLoc(Op), Op.getValueType(), |
| 3998 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); |
| 3999 | case Intrinsic::arm_mve_pred_i2v: |
| 4000 | case Intrinsic::arm_mve_pred_v2i: |
| 4001 | return DAG.getNode(ARMISD::PREDICATE_CAST, SDLoc(Op), Op.getValueType(), |
| 4002 | Op.getOperand(1)); |
| 4003 | case Intrinsic::arm_mve_vreinterpretq: |
| 4004 | return DAG.getNode(ARMISD::VECTOR_REG_CAST, SDLoc(Op), Op.getValueType(), |
| 4005 | Op.getOperand(1)); |
| 4006 | case Intrinsic::arm_mve_lsll: |
| 4007 | return DAG.getNode(ARMISD::LSLL, SDLoc(Op), Op->getVTList(), |
| 4008 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); |
| 4009 | case Intrinsic::arm_mve_asrl: |
| 4010 | return DAG.getNode(ARMISD::ASRL, SDLoc(Op), Op->getVTList(), |
| 4011 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); |
| 4012 | } |
| 4013 | } |
| 4014 | |
| 4015 | static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG, |
| 4016 | const ARMSubtarget *Subtarget) { |
| 4017 | SDLoc dl(Op); |
| 4018 | ConstantSDNode *SSIDNode = cast<ConstantSDNode>(Op.getOperand(2)); |
| 4019 | auto SSID = static_cast<SyncScope::ID>(SSIDNode->getZExtValue()); |
| 4020 | if (SSID == SyncScope::SingleThread) |
| 4021 | return Op; |
| 4022 | |
| 4023 | if (!Subtarget->hasDataBarrier()) { |
| 4024 | // Some ARMv6 cpus can support data barriers with an mcr instruction. |
| 4025 | // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get |
| 4026 | // here. |
| 4027 | assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() && |
| 4028 | "Unexpected ISD::ATOMIC_FENCE encountered. Should be libcall!" ); |
| 4029 | return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0), |
| 4030 | DAG.getConstant(0, dl, MVT::i32)); |
| 4031 | } |
| 4032 | |
| 4033 | ConstantSDNode *OrdN = cast<ConstantSDNode>(Op.getOperand(1)); |
| 4034 | AtomicOrdering Ord = static_cast<AtomicOrdering>(OrdN->getZExtValue()); |
| 4035 | ARM_MB::MemBOpt Domain = ARM_MB::ISH; |
| 4036 | if (Subtarget->isMClass()) { |
| 4037 | // Only a full system barrier exists in the M-class architectures. |
| 4038 | Domain = ARM_MB::SY; |
| 4039 | } else if (Subtarget->preferISHSTBarriers() && |
| 4040 | Ord == AtomicOrdering::Release) { |
| 4041 | // Swift happens to implement ISHST barriers in a way that's compatible with |
| 4042 | // Release semantics but weaker than ISH so we'd be fools not to use |
| 4043 | // it. Beware: other processors probably don't! |
| 4044 | Domain = ARM_MB::ISHST; |
| 4045 | } |
| 4046 | |
| 4047 | return DAG.getNode(ISD::INTRINSIC_VOID, dl, MVT::Other, Op.getOperand(0), |
| 4048 | DAG.getConstant(Intrinsic::arm_dmb, dl, MVT::i32), |
| 4049 | DAG.getConstant(Domain, dl, MVT::i32)); |
| 4050 | } |
| 4051 | |
| 4052 | static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG, |
| 4053 | const ARMSubtarget *Subtarget) { |
| 4054 | // ARM pre v5TE and Thumb1 does not have preload instructions. |
| 4055 | if (!(Subtarget->isThumb2() || |
| 4056 | (!Subtarget->isThumb1Only() && Subtarget->hasV5TEOps()))) |
| 4057 | // Just preserve the chain. |
| 4058 | return Op.getOperand(0); |
| 4059 | |
| 4060 | SDLoc dl(Op); |
| 4061 | unsigned isRead = ~cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() & 1; |
| 4062 | if (!isRead && |
| 4063 | (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension())) |
| 4064 | // ARMv7 with MP extension has PLDW. |
| 4065 | return Op.getOperand(0); |
| 4066 | |
| 4067 | unsigned isData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue(); |
| 4068 | if (Subtarget->isThumb()) { |
| 4069 | // Invert the bits. |
| 4070 | isRead = ~isRead & 1; |
| 4071 | isData = ~isData & 1; |
| 4072 | } |
| 4073 | |
| 4074 | return DAG.getNode(ARMISD::PRELOAD, dl, MVT::Other, Op.getOperand(0), |
| 4075 | Op.getOperand(1), DAG.getConstant(isRead, dl, MVT::i32), |
| 4076 | DAG.getConstant(isData, dl, MVT::i32)); |
| 4077 | } |
| 4078 | |
| 4079 | static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) { |
| 4080 | MachineFunction &MF = DAG.getMachineFunction(); |
| 4081 | ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>(); |
| 4082 | |
| 4083 | // vastart just stores the address of the VarArgsFrameIndex slot into the |
| 4084 | // memory location argument. |
| 4085 | SDLoc dl(Op); |
| 4086 | EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); |
| 4087 | SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); |
| 4088 | const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); |
| 4089 | return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), |
| 4090 | MachinePointerInfo(SV)); |
| 4091 | } |
| 4092 | |
| 4093 | SDValue ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, |
| 4094 | CCValAssign &NextVA, |
| 4095 | SDValue &Root, |
| 4096 | SelectionDAG &DAG, |
| 4097 | const SDLoc &dl) const { |
| 4098 | MachineFunction &MF = DAG.getMachineFunction(); |
| 4099 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 4100 | |
| 4101 | const TargetRegisterClass *RC; |
| 4102 | if (AFI->isThumb1OnlyFunction()) |
| 4103 | RC = &ARM::tGPRRegClass; |
| 4104 | else |
| 4105 | RC = &ARM::GPRRegClass; |
| 4106 | |
| 4107 | // Transform the arguments stored in physical registers into virtual ones. |
| 4108 | unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); |
| 4109 | SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); |
| 4110 | |
| 4111 | SDValue ArgValue2; |
| 4112 | if (NextVA.isMemLoc()) { |
| 4113 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
| 4114 | int FI = MFI.CreateFixedObject(4, NextVA.getLocMemOffset(), true); |
| 4115 | |
| 4116 | // Create load node to retrieve arguments from the stack. |
| 4117 | SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); |
| 4118 | ArgValue2 = DAG.getLoad( |
| 4119 | MVT::i32, dl, Root, FIN, |
| 4120 | MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)); |
| 4121 | } else { |
| 4122 | Reg = MF.addLiveIn(NextVA.getLocReg(), RC); |
| 4123 | ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); |
| 4124 | } |
| 4125 | if (!Subtarget->isLittle()) |
| 4126 | std::swap (ArgValue, ArgValue2); |
| 4127 | return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2); |
| 4128 | } |
| 4129 | |
| 4130 | // The remaining GPRs hold either the beginning of variable-argument |
| 4131 | // data, or the beginning of an aggregate passed by value (usually |
| 4132 | // byval). Either way, we allocate stack slots adjacent to the data |
| 4133 | // provided by our caller, and store the unallocated registers there. |
| 4134 | // If this is a variadic function, the va_list pointer will begin with |
| 4135 | // these values; otherwise, this reassembles a (byval) structure that |
| 4136 | // was split between registers and memory. |
| 4137 | // Return: The frame index registers were stored into. |
| 4138 | int ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG, |
| 4139 | const SDLoc &dl, SDValue &Chain, |
| 4140 | const Value *OrigArg, |
| 4141 | unsigned InRegsParamRecordIdx, |
| 4142 | int ArgOffset, unsigned ArgSize) const { |
| 4143 | // Currently, two use-cases possible: |
| 4144 | // Case #1. Non-var-args function, and we meet first byval parameter. |
| 4145 | // Setup first unallocated register as first byval register; |
| 4146 | // eat all remained registers |
| 4147 | // (these two actions are performed by HandleByVal method). |
| 4148 | // Then, here, we initialize stack frame with |
| 4149 | // "store-reg" instructions. |
| 4150 | // Case #2. Var-args function, that doesn't contain byval parameters. |
| 4151 | // The same: eat all remained unallocated registers, |
| 4152 | // initialize stack frame. |
| 4153 | |
| 4154 | MachineFunction &MF = DAG.getMachineFunction(); |
| 4155 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
| 4156 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 4157 | unsigned RBegin, REnd; |
| 4158 | if (InRegsParamRecordIdx < CCInfo.getInRegsParamsCount()) { |
| 4159 | CCInfo.getInRegsParamInfo(InRegsParamRecordIdx, RBegin, REnd); |
| 4160 | } else { |
| 4161 | unsigned RBeginIdx = CCInfo.getFirstUnallocated(GPRArgRegs); |
| 4162 | RBegin = RBeginIdx == 4 ? (unsigned)ARM::R4 : GPRArgRegs[RBeginIdx]; |
| 4163 | REnd = ARM::R4; |
| 4164 | } |
| 4165 | |
| 4166 | if (REnd != RBegin) |
| 4167 | ArgOffset = -4 * (ARM::R4 - RBegin); |
| 4168 | |
| 4169 | auto PtrVT = getPointerTy(DAG.getDataLayout()); |
| 4170 | int FrameIndex = MFI.CreateFixedObject(ArgSize, ArgOffset, false); |
| 4171 | SDValue FIN = DAG.getFrameIndex(FrameIndex, PtrVT); |
| 4172 | |
| 4173 | SmallVector<SDValue, 4> MemOps; |
| 4174 | const TargetRegisterClass *RC = |
| 4175 | AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass : &ARM::GPRRegClass; |
| 4176 | |
| 4177 | for (unsigned Reg = RBegin, i = 0; Reg < REnd; ++Reg, ++i) { |
| 4178 | unsigned VReg = MF.addLiveIn(Reg, RC); |
| 4179 | SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); |
| 4180 | SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, |
| 4181 | MachinePointerInfo(OrigArg, 4 * i)); |
| 4182 | MemOps.push_back(Store); |
| 4183 | FIN = DAG.getNode(ISD::ADD, dl, PtrVT, FIN, DAG.getConstant(4, dl, PtrVT)); |
| 4184 | } |
| 4185 | |
| 4186 | if (!MemOps.empty()) |
| 4187 | Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); |
| 4188 | return FrameIndex; |
| 4189 | } |
| 4190 | |
| 4191 | // Setup stack frame, the va_list pointer will start from. |
| 4192 | void ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, |
| 4193 | const SDLoc &dl, SDValue &Chain, |
| 4194 | unsigned ArgOffset, |
| 4195 | unsigned TotalArgRegsSaveSize, |
| 4196 | bool ForceMutable) const { |
| 4197 | MachineFunction &MF = DAG.getMachineFunction(); |
| 4198 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 4199 | |
| 4200 | // Try to store any remaining integer argument regs |
| 4201 | // to their spots on the stack so that they may be loaded by dereferencing |
| 4202 | // the result of va_next. |
| 4203 | // If there is no regs to be stored, just point address after last |
| 4204 | // argument passed via stack. |
| 4205 | int FrameIndex = StoreByValRegs(CCInfo, DAG, dl, Chain, nullptr, |
| 4206 | CCInfo.getInRegsParamsCount(), |
| 4207 | CCInfo.getNextStackOffset(), |
| 4208 | std::max(4U, TotalArgRegsSaveSize)); |
| 4209 | AFI->setVarArgsFrameIndex(FrameIndex); |
| 4210 | } |
| 4211 | |
| 4212 | bool ARMTargetLowering::splitValueIntoRegisterParts( |
| 4213 | SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, |
| 4214 | unsigned NumParts, MVT PartVT, Optional<CallingConv::ID> CC) const { |
| 4215 | bool IsABIRegCopy = CC.hasValue(); |
| 4216 | EVT ValueVT = Val.getValueType(); |
| 4217 | if (IsABIRegCopy && (ValueVT == MVT::f16 || ValueVT == MVT::bf16) && |
| 4218 | PartVT == MVT::f32) { |
| 4219 | unsigned ValueBits = ValueVT.getSizeInBits(); |
| 4220 | unsigned PartBits = PartVT.getSizeInBits(); |
| 4221 | Val = DAG.getNode(ISD::BITCAST, DL, MVT::getIntegerVT(ValueBits), Val); |
| 4222 | Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::getIntegerVT(PartBits), Val); |
| 4223 | Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val); |
| 4224 | Parts[0] = Val; |
| 4225 | return true; |
| 4226 | } |
| 4227 | return false; |
| 4228 | } |
| 4229 | |
| 4230 | SDValue ARMTargetLowering::joinRegisterPartsIntoValue( |
| 4231 | SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, |
| 4232 | MVT PartVT, EVT ValueVT, Optional<CallingConv::ID> CC) const { |
| 4233 | bool IsABIRegCopy = CC.hasValue(); |
| 4234 | if (IsABIRegCopy && (ValueVT == MVT::f16 || ValueVT == MVT::bf16) && |
| 4235 | PartVT == MVT::f32) { |
| 4236 | unsigned ValueBits = ValueVT.getSizeInBits(); |
| 4237 | unsigned PartBits = PartVT.getSizeInBits(); |
| 4238 | SDValue Val = Parts[0]; |
| 4239 | |
| 4240 | Val = DAG.getNode(ISD::BITCAST, DL, MVT::getIntegerVT(PartBits), Val); |
| 4241 | Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::getIntegerVT(ValueBits), Val); |
| 4242 | Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val); |
| 4243 | return Val; |
| 4244 | } |
| 4245 | return SDValue(); |
| 4246 | } |
| 4247 | |
| 4248 | SDValue ARMTargetLowering::LowerFormalArguments( |
| 4249 | SDValue Chain, CallingConv::ID CallConv, bool isVarArg, |
| 4250 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, |
| 4251 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { |
| 4252 | MachineFunction &MF = DAG.getMachineFunction(); |
| 4253 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
| 4254 | |
| 4255 | ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); |
| 4256 | |
| 4257 | // Assign locations to all of the incoming arguments. |
| 4258 | SmallVector<CCValAssign, 16> ArgLocs; |
| 4259 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, |
| 4260 | *DAG.getContext()); |
| 4261 | CCInfo.AnalyzeFormalArguments(Ins, CCAssignFnForCall(CallConv, isVarArg)); |
| 4262 | |
| 4263 | SmallVector<SDValue, 16> ArgValues; |
| 4264 | SDValue ArgValue; |
| 4265 | Function::const_arg_iterator CurOrigArg = MF.getFunction().arg_begin(); |
| 4266 | unsigned CurArgIdx = 0; |
| 4267 | |
| 4268 | // Initially ArgRegsSaveSize is zero. |
| 4269 | // Then we increase this value each time we meet byval parameter. |
| 4270 | // We also increase this value in case of varargs function. |
| 4271 | AFI->setArgRegsSaveSize(0); |
| 4272 | |
| 4273 | // Calculate the amount of stack space that we need to allocate to store |
| 4274 | // byval and variadic arguments that are passed in registers. |
| 4275 | // We need to know this before we allocate the first byval or variadic |
| 4276 | // argument, as they will be allocated a stack slot below the CFA (Canonical |
| 4277 | // Frame Address, the stack pointer at entry to the function). |
| 4278 | unsigned ArgRegBegin = ARM::R4; |
| 4279 | for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { |
| 4280 | if (CCInfo.getInRegsParamsProcessed() >= CCInfo.getInRegsParamsCount()) |
| 4281 | break; |
| 4282 | |
| 4283 | CCValAssign &VA = ArgLocs[i]; |
| 4284 | unsigned Index = VA.getValNo(); |
| 4285 | ISD::ArgFlagsTy Flags = Ins[Index].Flags; |
| 4286 | if (!Flags.isByVal()) |
| 4287 | continue; |
| 4288 | |
| 4289 | assert(VA.isMemLoc() && "unexpected byval pointer in reg" ); |
| 4290 | unsigned RBegin, REnd; |
| 4291 | CCInfo.getInRegsParamInfo(CCInfo.getInRegsParamsProcessed(), RBegin, REnd); |
| 4292 | ArgRegBegin = std::min(ArgRegBegin, RBegin); |
| 4293 | |
| 4294 | CCInfo.nextInRegsParam(); |
| 4295 | } |
| 4296 | CCInfo.rewindByValRegsInfo(); |
| 4297 | |
| 4298 | int lastInsIndex = -1; |
| 4299 | if (isVarArg && MFI.hasVAStart()) { |
| 4300 | unsigned RegIdx = CCInfo.getFirstUnallocated(GPRArgRegs); |
| 4301 | if (RegIdx != array_lengthof(GPRArgRegs)) |
| 4302 | ArgRegBegin = std::min(ArgRegBegin, (unsigned)GPRArgRegs[RegIdx]); |
| 4303 | } |
| 4304 | |
| 4305 | unsigned TotalArgRegsSaveSize = 4 * (ARM::R4 - ArgRegBegin); |
| 4306 | AFI->setArgRegsSaveSize(TotalArgRegsSaveSize); |
| 4307 | auto PtrVT = getPointerTy(DAG.getDataLayout()); |
| 4308 | |
| 4309 | for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { |
| 4310 | CCValAssign &VA = ArgLocs[i]; |
| 4311 | if (Ins[VA.getValNo()].isOrigArg()) { |
| 4312 | std::advance(CurOrigArg, |
| 4313 | Ins[VA.getValNo()].getOrigArgIndex() - CurArgIdx); |
| 4314 | CurArgIdx = Ins[VA.getValNo()].getOrigArgIndex(); |
| 4315 | } |
| 4316 | // Arguments stored in registers. |
| 4317 | if (VA.isRegLoc()) { |
| 4318 | EVT RegVT = VA.getLocVT(); |
| 4319 | |
| 4320 | if (VA.needsCustom() && VA.getLocVT() == MVT::v2f64) { |
| 4321 | // f64 and vector types are split up into multiple registers or |
| 4322 | // combinations of registers and stack slots. |
| 4323 | SDValue ArgValue1 = |
| 4324 | GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl); |
| 4325 | VA = ArgLocs[++i]; // skip ahead to next loc |
| 4326 | SDValue ArgValue2; |
| 4327 | if (VA.isMemLoc()) { |
| 4328 | int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), true); |
| 4329 | SDValue FIN = DAG.getFrameIndex(FI, PtrVT); |
| 4330 | ArgValue2 = DAG.getLoad( |
| 4331 | MVT::f64, dl, Chain, FIN, |
| 4332 | MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)); |
| 4333 | } else { |
| 4334 | ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl); |
| 4335 | } |
| 4336 | ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); |
| 4337 | ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, ArgValue, |
| 4338 | ArgValue1, DAG.getIntPtrConstant(0, dl)); |
| 4339 | ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, ArgValue, |
| 4340 | ArgValue2, DAG.getIntPtrConstant(1, dl)); |
| 4341 | } else if (VA.needsCustom() && VA.getLocVT() == MVT::f64) { |
| 4342 | ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl); |
| 4343 | } else { |
| 4344 | const TargetRegisterClass *RC; |
| 4345 | |
| 4346 | if (RegVT == MVT::f16 || RegVT == MVT::bf16) |
| 4347 | RC = &ARM::HPRRegClass; |
| 4348 | else if (RegVT == MVT::f32) |
| 4349 | RC = &ARM::SPRRegClass; |
| 4350 | else if (RegVT == MVT::f64 || RegVT == MVT::v4f16 || |
| 4351 | RegVT == MVT::v4bf16) |
| 4352 | RC = &ARM::DPRRegClass; |
| 4353 | else if (RegVT == MVT::v2f64 || RegVT == MVT::v8f16 || |
| 4354 | RegVT == MVT::v8bf16) |
| 4355 | RC = &ARM::QPRRegClass; |
| 4356 | else if (RegVT == MVT::i32) |
| 4357 | RC = AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass |
| 4358 | : &ARM::GPRRegClass; |
| 4359 | else |
| 4360 | llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering" ); |
| 4361 | |
| 4362 | // Transform the arguments in physical registers into virtual ones. |
| 4363 | unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); |
| 4364 | ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); |
| 4365 | |
| 4366 | // If this value is passed in r0 and has the returned attribute (e.g. |
| 4367 | // C++ 'structors), record this fact for later use. |
| 4368 | if (VA.getLocReg() == ARM::R0 && Ins[VA.getValNo()].Flags.isReturned()) { |
| 4369 | AFI->setPreservesR0(); |
| 4370 | } |
| 4371 | } |
| 4372 | |
| 4373 | // If this is an 8 or 16-bit value, it is really passed promoted |
| 4374 | // to 32 bits. Insert an assert[sz]ext to capture this, then |
| 4375 | // truncate to the right size. |
| 4376 | switch (VA.getLocInfo()) { |
| 4377 | default: llvm_unreachable("Unknown loc info!" ); |
| 4378 | case CCValAssign::Full: break; |
| 4379 | case CCValAssign::BCvt: |
| 4380 | ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue); |
| 4381 | break; |
| 4382 | case CCValAssign::SExt: |
| 4383 | ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, |
| 4384 | DAG.getValueType(VA.getValVT())); |
| 4385 | ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); |
| 4386 | break; |
| 4387 | case CCValAssign::ZExt: |
| 4388 | ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, |
| 4389 | DAG.getValueType(VA.getValVT())); |
| 4390 | ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); |
| 4391 | break; |
| 4392 | } |
| 4393 | |
| 4394 | // f16 arguments have their size extended to 4 bytes and passed as if they |
| 4395 | // had been copied to the LSBs of a 32-bit register. |
| 4396 | // For that, it's passed extended to i32 (soft ABI) or to f32 (hard ABI) |
| 4397 | if (VA.needsCustom() && |
| 4398 | (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16)) |
| 4399 | ArgValue = MoveToHPR(dl, DAG, VA.getLocVT(), VA.getValVT(), ArgValue); |
| 4400 | |
| 4401 | InVals.push_back(ArgValue); |
| 4402 | } else { // VA.isRegLoc() |
| 4403 | // sanity check |
| 4404 | assert(VA.isMemLoc()); |
| 4405 | assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered" ); |
| 4406 | |
| 4407 | int index = VA.getValNo(); |
| 4408 | |
| 4409 | // Some Ins[] entries become multiple ArgLoc[] entries. |
| 4410 | // Process them only once. |
| 4411 | if (index != lastInsIndex) |
| 4412 | { |
| 4413 | ISD::ArgFlagsTy Flags = Ins[index].Flags; |
| 4414 | // FIXME: For now, all byval parameter objects are marked mutable. |
| 4415 | // This can be changed with more analysis. |
| 4416 | // In case of tail call optimization mark all arguments mutable. |
| 4417 | // Since they could be overwritten by lowering of arguments in case of |
| 4418 | // a tail call. |
| 4419 | if (Flags.isByVal()) { |
| 4420 | assert(Ins[index].isOrigArg() && |
| 4421 | "Byval arguments cannot be implicit" ); |
| 4422 | unsigned CurByValIndex = CCInfo.getInRegsParamsProcessed(); |
| 4423 | |
| 4424 | int FrameIndex = StoreByValRegs( |
| 4425 | CCInfo, DAG, dl, Chain, &*CurOrigArg, CurByValIndex, |
| 4426 | VA.getLocMemOffset(), Flags.getByValSize()); |
| 4427 | InVals.push_back(DAG.getFrameIndex(FrameIndex, PtrVT)); |
| 4428 | CCInfo.nextInRegsParam(); |
| 4429 | } else { |
| 4430 | unsigned FIOffset = VA.getLocMemOffset(); |
| 4431 | int FI = MFI.CreateFixedObject(VA.getLocVT().getSizeInBits()/8, |
| 4432 | FIOffset, true); |
| 4433 | |
| 4434 | // Create load nodes to retrieve arguments from the stack. |
| 4435 | SDValue FIN = DAG.getFrameIndex(FI, PtrVT); |
| 4436 | InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, |
| 4437 | MachinePointerInfo::getFixedStack( |
| 4438 | DAG.getMachineFunction(), FI))); |
| 4439 | } |
| 4440 | lastInsIndex = index; |
| 4441 | } |
| 4442 | } |
| 4443 | } |
| 4444 | |
| 4445 | // varargs |
| 4446 | if (isVarArg && MFI.hasVAStart()) { |
| 4447 | VarArgStyleRegisters(CCInfo, DAG, dl, Chain, CCInfo.getNextStackOffset(), |
| 4448 | TotalArgRegsSaveSize); |
| 4449 | if (AFI->isCmseNSEntryFunction()) { |
| 4450 | DiagnosticInfoUnsupported Diag( |
| 4451 | DAG.getMachineFunction().getFunction(), |
| 4452 | "secure entry function must not be variadic" , dl.getDebugLoc()); |
| 4453 | DAG.getContext()->diagnose(Diag); |
| 4454 | } |
| 4455 | } |
| 4456 | |
| 4457 | AFI->setArgumentStackSize(CCInfo.getNextStackOffset()); |
| 4458 | |
| 4459 | if (CCInfo.getNextStackOffset() > 0 && AFI->isCmseNSEntryFunction()) { |
| 4460 | DiagnosticInfoUnsupported Diag( |
| 4461 | DAG.getMachineFunction().getFunction(), |
| 4462 | "secure entry function requires arguments on stack" , dl.getDebugLoc()); |
| 4463 | DAG.getContext()->diagnose(Diag); |
| 4464 | } |
| 4465 | |
| 4466 | return Chain; |
| 4467 | } |
| 4468 | |
| 4469 | /// isFloatingPointZero - Return true if this is +0.0. |
| 4470 | static bool isFloatingPointZero(SDValue Op) { |
| 4471 | if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) |
| 4472 | return CFP->getValueAPF().isPosZero(); |
| 4473 | else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { |
| 4474 | // Maybe this has already been legalized into the constant pool? |
| 4475 | if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) { |
| 4476 | SDValue WrapperOp = Op.getOperand(1).getOperand(0); |
| 4477 | if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp)) |
| 4478 | if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) |
| 4479 | return CFP->getValueAPF().isPosZero(); |
| 4480 | } |
| 4481 | } else if (Op->getOpcode() == ISD::BITCAST && |
| 4482 | Op->getValueType(0) == MVT::f64) { |
| 4483 | // Handle (ISD::BITCAST (ARMISD::VMOVIMM (ISD::TargetConstant 0)) MVT::f64) |
| 4484 | // created by LowerConstantFP(). |
| 4485 | SDValue BitcastOp = Op->getOperand(0); |
| 4486 | if (BitcastOp->getOpcode() == ARMISD::VMOVIMM && |
| 4487 | isNullConstant(BitcastOp->getOperand(0))) |
| 4488 | return true; |
| 4489 | } |
| 4490 | return false; |
| 4491 | } |
| 4492 | |
| 4493 | /// Returns appropriate ARM CMP (cmp) and corresponding condition code for |
| 4494 | /// the given operands. |
| 4495 | SDValue ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, |
| 4496 | SDValue &ARMcc, SelectionDAG &DAG, |
| 4497 | const SDLoc &dl) const { |
| 4498 | if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) { |
| 4499 | unsigned C = RHSC->getZExtValue(); |
| 4500 | if (!isLegalICmpImmediate((int32_t)C)) { |
| 4501 | // Constant does not fit, try adjusting it by one. |
| 4502 | switch (CC) { |
| 4503 | default: break; |
| 4504 | case ISD::SETLT: |
| 4505 | case ISD::SETGE: |
| 4506 | if (C != 0x80000000 && isLegalICmpImmediate(C-1)) { |
| 4507 | CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT; |
| 4508 | RHS = DAG.getConstant(C - 1, dl, MVT::i32); |
| 4509 | } |
| 4510 | break; |
| 4511 | case ISD::SETULT: |
| 4512 | case ISD::SETUGE: |
| 4513 | if (C != 0 && isLegalICmpImmediate(C-1)) { |
| 4514 | CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT; |
| 4515 | RHS = DAG.getConstant(C - 1, dl, MVT::i32); |
| 4516 | } |
| 4517 | break; |
| 4518 | case ISD::SETLE: |
| 4519 | case ISD::SETGT: |
| 4520 | if (C != 0x7fffffff && isLegalICmpImmediate(C+1)) { |
| 4521 | CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE; |
| 4522 | RHS = DAG.getConstant(C + 1, dl, MVT::i32); |
| 4523 | } |
| 4524 | break; |
| 4525 | case ISD::SETULE: |
| 4526 | case ISD::SETUGT: |
| 4527 | if (C != 0xffffffff && isLegalICmpImmediate(C+1)) { |
| 4528 | CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; |
| 4529 | RHS = DAG.getConstant(C + 1, dl, MVT::i32); |
| 4530 | } |
| 4531 | break; |
| 4532 | } |
| 4533 | } |
| 4534 | } else if ((ARM_AM::getShiftOpcForNode(LHS.getOpcode()) != ARM_AM::no_shift) && |
| 4535 | (ARM_AM::getShiftOpcForNode(RHS.getOpcode()) == ARM_AM::no_shift)) { |
| 4536 | // In ARM and Thumb-2, the compare instructions can shift their second |
| 4537 | // operand. |
| 4538 | CC = ISD::getSetCCSwappedOperands(CC); |
| 4539 | std::swap(LHS, RHS); |
| 4540 | } |
| 4541 | |
| 4542 | // Thumb1 has very limited immediate modes, so turning an "and" into a |
| 4543 | // shift can save multiple instructions. |
| 4544 | // |
| 4545 | // If we have (x & C1), and C1 is an appropriate mask, we can transform it |
| 4546 | // into "((x << n) >> n)". But that isn't necessarily profitable on its |
| 4547 | // own. If it's the operand to an unsigned comparison with an immediate, |
| 4548 | // we can eliminate one of the shifts: we transform |
| 4549 | // "((x << n) >> n) == C2" to "(x << n) == (C2 << n)". |
| 4550 | // |
| 4551 | // We avoid transforming cases which aren't profitable due to encoding |
| 4552 | // details: |
| 4553 | // |
| 4554 | // 1. C2 fits into the immediate field of a cmp, and the transformed version |
| 4555 | // would not; in that case, we're essentially trading one immediate load for |
| 4556 | // another. |
| 4557 | // 2. C1 is 255 or 65535, so we can use uxtb or uxth. |
| 4558 | // 3. C2 is zero; we have other code for this special case. |
| 4559 | // |
| 4560 | // FIXME: Figure out profitability for Thumb2; we usually can't save an |
| 4561 | // instruction, since the AND is always one instruction anyway, but we could |
| 4562 | // use narrow instructions in some cases. |
| 4563 | if (Subtarget->isThumb1Only() && LHS->getOpcode() == ISD::AND && |
| 4564 | LHS->hasOneUse() && isa<ConstantSDNode>(LHS.getOperand(1)) && |
| 4565 | LHS.getValueType() == MVT::i32 && isa<ConstantSDNode>(RHS) && |
| 4566 | !isSignedIntSetCC(CC)) { |
| 4567 | unsigned Mask = cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue(); |
| 4568 | auto *RHSC = cast<ConstantSDNode>(RHS.getNode()); |
| 4569 | uint64_t RHSV = RHSC->getZExtValue(); |
| 4570 | if (isMask_32(Mask) && (RHSV & ~Mask) == 0 && Mask != 255 && Mask != 65535) { |
| 4571 | unsigned ShiftBits = countLeadingZeros(Mask); |
| 4572 | if (RHSV && (RHSV > 255 || (RHSV << ShiftBits) <= 255)) { |
| 4573 | SDValue ShiftAmt = DAG.getConstant(ShiftBits, dl, MVT::i32); |
| 4574 | LHS = DAG.getNode(ISD::SHL, dl, MVT::i32, LHS.getOperand(0), ShiftAmt); |
| 4575 | RHS = DAG.getConstant(RHSV << ShiftBits, dl, MVT::i32); |
| 4576 | } |
| 4577 | } |
| 4578 | } |
| 4579 | |
| 4580 | // The specific comparison "(x<<c) > 0x80000000U" can be optimized to a |
| 4581 | // single "lsls x, c+1". The shift sets the "C" and "Z" flags the same |
| 4582 | // way a cmp would. |
| 4583 | // FIXME: Add support for ARM/Thumb2; this would need isel patterns, and |
| 4584 | // some tweaks to the heuristics for the previous and->shift transform. |
| 4585 | // FIXME: Optimize cases where the LHS isn't a shift. |
| 4586 | if (Subtarget->isThumb1Only() && LHS->getOpcode() == ISD::SHL && |
| 4587 | isa<ConstantSDNode>(RHS) && |
| 4588 | cast<ConstantSDNode>(RHS)->getZExtValue() == 0x80000000U && |
| 4589 | CC == ISD::SETUGT && isa<ConstantSDNode>(LHS.getOperand(1)) && |
| 4590 | cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() < 31) { |
| 4591 | unsigned ShiftAmt = |
| 4592 | cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() + 1; |
| 4593 | SDValue Shift = DAG.getNode(ARMISD::LSLS, dl, |
| 4594 | DAG.getVTList(MVT::i32, MVT::i32), |
| 4595 | LHS.getOperand(0), |
| 4596 | DAG.getConstant(ShiftAmt, dl, MVT::i32)); |
| 4597 | SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, ARM::CPSR, |
| 4598 | Shift.getValue(1), SDValue()); |
| 4599 | ARMcc = DAG.getConstant(ARMCC::HI, dl, MVT::i32); |
| 4600 | return Chain.getValue(1); |
| 4601 | } |
| 4602 | |
| 4603 | ARMCC::CondCodes CondCode = IntCCToARMCC(CC); |
| 4604 | |
| 4605 | // If the RHS is a constant zero then the V (overflow) flag will never be |
| 4606 | // set. This can allow us to simplify GE to PL or LT to MI, which can be |
| 4607 | // simpler for other passes (like the peephole optimiser) to deal with. |
| 4608 | if (isNullConstant(RHS)) { |
| 4609 | switch (CondCode) { |
| 4610 | default: break; |
| 4611 | case ARMCC::GE: |
| 4612 | CondCode = ARMCC::PL; |
| 4613 | break; |
| 4614 | case ARMCC::LT: |
| 4615 | CondCode = ARMCC::MI; |
| 4616 | break; |
| 4617 | } |
| 4618 | } |
| 4619 | |
| 4620 | ARMISD::NodeType CompareType; |
| 4621 | switch (CondCode) { |
| 4622 | default: |
| 4623 | CompareType = ARMISD::CMP; |
| 4624 | break; |
| 4625 | case ARMCC::EQ: |
| 4626 | case ARMCC::NE: |
| 4627 | // Uses only Z Flag |
| 4628 | CompareType = ARMISD::CMPZ; |
| 4629 | break; |
| 4630 | } |
| 4631 | ARMcc = DAG.getConstant(CondCode, dl, MVT::i32); |
| 4632 | return DAG.getNode(CompareType, dl, MVT::Glue, LHS, RHS); |
| 4633 | } |
| 4634 | |
| 4635 | /// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands. |
| 4636 | SDValue ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, |
| 4637 | SelectionDAG &DAG, const SDLoc &dl, |
| 4638 | bool Signaling) const { |
| 4639 | assert(Subtarget->hasFP64() || RHS.getValueType() != MVT::f64); |
| 4640 | SDValue Cmp; |
| 4641 | if (!isFloatingPointZero(RHS)) |
| 4642 | Cmp = DAG.getNode(Signaling ? ARMISD::CMPFPE : ARMISD::CMPFP, |
| 4643 | dl, MVT::Glue, LHS, RHS); |
| 4644 | else |
| 4645 | Cmp = DAG.getNode(Signaling ? ARMISD::CMPFPEw0 : ARMISD::CMPFPw0, |
| 4646 | dl, MVT::Glue, LHS); |
| 4647 | return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Glue, Cmp); |
| 4648 | } |
| 4649 | |
| 4650 | /// duplicateCmp - Glue values can have only one use, so this function |
| 4651 | /// duplicates a comparison node. |
| 4652 | SDValue |
| 4653 | ARMTargetLowering::duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const { |
| 4654 | unsigned Opc = Cmp.getOpcode(); |
| 4655 | SDLoc DL(Cmp); |
| 4656 | if (Opc == ARMISD::CMP || Opc == ARMISD::CMPZ) |
| 4657 | return DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1)); |
| 4658 | |
| 4659 | assert(Opc == ARMISD::FMSTAT && "unexpected comparison operation" ); |
| 4660 | Cmp = Cmp.getOperand(0); |
| 4661 | Opc = Cmp.getOpcode(); |
| 4662 | if (Opc == ARMISD::CMPFP) |
| 4663 | Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1)); |
| 4664 | else { |
| 4665 | assert(Opc == ARMISD::CMPFPw0 && "unexpected operand of FMSTAT" ); |
| 4666 | Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0)); |
| 4667 | } |
| 4668 | return DAG.getNode(ARMISD::FMSTAT, DL, MVT::Glue, Cmp); |
| 4669 | } |
| 4670 | |
| 4671 | // This function returns three things: the arithmetic computation itself |
| 4672 | // (Value), a comparison (OverflowCmp), and a condition code (ARMcc). The |
| 4673 | // comparison and the condition code define the case in which the arithmetic |
| 4674 | // computation *does not* overflow. |
| 4675 | std::pair<SDValue, SDValue> |
| 4676 | ARMTargetLowering::getARMXALUOOp(SDValue Op, SelectionDAG &DAG, |
| 4677 | SDValue &ARMcc) const { |
| 4678 | assert(Op.getValueType() == MVT::i32 && "Unsupported value type" ); |
| 4679 | |
| 4680 | SDValue Value, OverflowCmp; |
| 4681 | SDValue LHS = Op.getOperand(0); |
| 4682 | SDValue RHS = Op.getOperand(1); |
| 4683 | SDLoc dl(Op); |
| 4684 | |
| 4685 | // FIXME: We are currently always generating CMPs because we don't support |
| 4686 | // generating CMN through the backend. This is not as good as the natural |
| 4687 | // CMP case because it causes a register dependency and cannot be folded |
| 4688 | // later. |
| 4689 | |
| 4690 | switch (Op.getOpcode()) { |
| 4691 | default: |
| 4692 | llvm_unreachable("Unknown overflow instruction!" ); |
| 4693 | case ISD::SADDO: |
| 4694 | ARMcc = DAG.getConstant(ARMCC::VC, dl, MVT::i32); |
| 4695 | Value = DAG.getNode(ISD::ADD, dl, Op.getValueType(), LHS, RHS); |
| 4696 | OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value, LHS); |
| 4697 | break; |
| 4698 | case ISD::UADDO: |
| 4699 | ARMcc = DAG.getConstant(ARMCC::HS, dl, MVT::i32); |
| 4700 | // We use ADDC here to correspond to its use in LowerUnsignedALUO. |
| 4701 | // We do not use it in the USUBO case as Value may not be used. |
| 4702 | Value = DAG.getNode(ARMISD::ADDC, dl, |
| 4703 | DAG.getVTList(Op.getValueType(), MVT::i32), LHS, RHS) |
| 4704 | .getValue(0); |
| 4705 | OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value, LHS); |
| 4706 | break; |
| 4707 | case ISD::SSUBO: |
| 4708 | ARMcc = DAG.getConstant(ARMCC::VC, dl, MVT::i32); |
| 4709 | Value = DAG.getNode(ISD::SUB, dl, Op.getValueType(), LHS, RHS); |
| 4710 | OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, LHS, RHS); |
| 4711 | break; |
| 4712 | case ISD::USUBO: |
| 4713 | ARMcc = DAG.getConstant(ARMCC::HS, dl, MVT::i32); |
| 4714 | Value = DAG.getNode(ISD::SUB, dl, Op.getValueType(), LHS, RHS); |
| 4715 | OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, LHS, RHS); |
| 4716 | break; |
| 4717 | case ISD::UMULO: |
| 4718 | // We generate a UMUL_LOHI and then check if the high word is 0. |
| 4719 | ARMcc = DAG.getConstant(ARMCC::EQ, dl, MVT::i32); |
| 4720 | Value = DAG.getNode(ISD::UMUL_LOHI, dl, |
| 4721 | DAG.getVTList(Op.getValueType(), Op.getValueType()), |
| 4722 | LHS, RHS); |
| 4723 | OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value.getValue(1), |
| 4724 | DAG.getConstant(0, dl, MVT::i32)); |
| 4725 | Value = Value.getValue(0); // We only want the low 32 bits for the result. |
| 4726 | break; |
| 4727 | case ISD::SMULO: |
| 4728 | // We generate a SMUL_LOHI and then check if all the bits of the high word |
| 4729 | // are the same as the sign bit of the low word. |
| 4730 | ARMcc = DAG.getConstant(ARMCC::EQ, dl, MVT::i32); |
| 4731 | Value = DAG.getNode(ISD::SMUL_LOHI, dl, |
| 4732 | DAG.getVTList(Op.getValueType(), Op.getValueType()), |
| 4733 | LHS, RHS); |
| 4734 | OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value.getValue(1), |
| 4735 | DAG.getNode(ISD::SRA, dl, Op.getValueType(), |
| 4736 | Value.getValue(0), |
| 4737 | DAG.getConstant(31, dl, MVT::i32))); |
| 4738 | Value = Value.getValue(0); // We only want the low 32 bits for the result. |
| 4739 | break; |
| 4740 | } // switch (...) |
| 4741 | |
| 4742 | return std::make_pair(Value, OverflowCmp); |
| 4743 | } |
| 4744 | |
| 4745 | SDValue |
| 4746 | ARMTargetLowering::LowerSignedALUO(SDValue Op, SelectionDAG &DAG) const { |
| 4747 | // Let legalize expand this if it isn't a legal type yet. |
| 4748 | if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType())) |
| 4749 | return SDValue(); |
| 4750 | |
| 4751 | SDValue Value, OverflowCmp; |
| 4752 | SDValue ARMcc; |
| 4753 | std::tie(Value, OverflowCmp) = getARMXALUOOp(Op, DAG, ARMcc); |
| 4754 | SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); |
| 4755 | SDLoc dl(Op); |
| 4756 | // We use 0 and 1 as false and true values. |
| 4757 | SDValue TVal = DAG.getConstant(1, dl, MVT::i32); |
| 4758 | SDValue FVal = DAG.getConstant(0, dl, MVT::i32); |
| 4759 | EVT VT = Op.getValueType(); |
| 4760 | |
| 4761 | SDValue Overflow = DAG.getNode(ARMISD::CMOV, dl, VT, TVal, FVal, |
| 4762 | ARMcc, CCR, OverflowCmp); |
| 4763 | |
| 4764 | SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); |
| 4765 | return DAG.getNode(ISD::MERGE_VALUES, dl, VTs, Value, Overflow); |
| 4766 | } |
| 4767 | |
| 4768 | static SDValue ConvertBooleanCarryToCarryFlag(SDValue BoolCarry, |
| 4769 | SelectionDAG &DAG) { |
| 4770 | SDLoc DL(BoolCarry); |
| 4771 | EVT CarryVT = BoolCarry.getValueType(); |
| 4772 | |
| 4773 | // This converts the boolean value carry into the carry flag by doing |
| 4774 | // ARMISD::SUBC Carry, 1 |
| 4775 | SDValue Carry = DAG.getNode(ARMISD::SUBC, DL, |
| 4776 | DAG.getVTList(CarryVT, MVT::i32), |
| 4777 | BoolCarry, DAG.getConstant(1, DL, CarryVT)); |
| 4778 | return Carry.getValue(1); |
| 4779 | } |
| 4780 | |
| 4781 | static SDValue ConvertCarryFlagToBooleanCarry(SDValue Flags, EVT VT, |
| 4782 | SelectionDAG &DAG) { |
| 4783 | SDLoc DL(Flags); |
| 4784 | |
| 4785 | // Now convert the carry flag into a boolean carry. We do this |
| 4786 | // using ARMISD:ADDE 0, 0, Carry |
| 4787 | return DAG.getNode(ARMISD::ADDE, DL, DAG.getVTList(VT, MVT::i32), |
| 4788 | DAG.getConstant(0, DL, MVT::i32), |
| 4789 | DAG.getConstant(0, DL, MVT::i32), Flags); |
| 4790 | } |
| 4791 | |
| 4792 | SDValue ARMTargetLowering::LowerUnsignedALUO(SDValue Op, |
| 4793 | SelectionDAG &DAG) const { |
| 4794 | // Let legalize expand this if it isn't a legal type yet. |
| 4795 | if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType())) |
| 4796 | return SDValue(); |
| 4797 | |
| 4798 | SDValue LHS = Op.getOperand(0); |
| 4799 | SDValue RHS = Op.getOperand(1); |
| 4800 | SDLoc dl(Op); |
| 4801 | |
| 4802 | EVT VT = Op.getValueType(); |
| 4803 | SDVTList VTs = DAG.getVTList(VT, MVT::i32); |
| 4804 | SDValue Value; |
| 4805 | SDValue Overflow; |
| 4806 | switch (Op.getOpcode()) { |
| 4807 | default: |
| 4808 | llvm_unreachable("Unknown overflow instruction!" ); |
| 4809 | case ISD::UADDO: |
| 4810 | Value = DAG.getNode(ARMISD::ADDC, dl, VTs, LHS, RHS); |
| 4811 | // Convert the carry flag into a boolean value. |
| 4812 | Overflow = ConvertCarryFlagToBooleanCarry(Value.getValue(1), VT, DAG); |
| 4813 | break; |
| 4814 | case ISD::USUBO: { |
| 4815 | Value = DAG.getNode(ARMISD::SUBC, dl, VTs, LHS, RHS); |
| 4816 | // Convert the carry flag into a boolean value. |
| 4817 | Overflow = ConvertCarryFlagToBooleanCarry(Value.getValue(1), VT, DAG); |
| 4818 | // ARMISD::SUBC returns 0 when we have to borrow, so make it an overflow |
| 4819 | // value. So compute 1 - C. |
| 4820 | Overflow = DAG.getNode(ISD::SUB, dl, MVT::i32, |
| 4821 | DAG.getConstant(1, dl, MVT::i32), Overflow); |
| 4822 | break; |
| 4823 | } |
| 4824 | } |
| 4825 | |
| 4826 | return DAG.getNode(ISD::MERGE_VALUES, dl, VTs, Value, Overflow); |
| 4827 | } |
| 4828 | |
| 4829 | static SDValue LowerSADDSUBSAT(SDValue Op, SelectionDAG &DAG, |
| 4830 | const ARMSubtarget *Subtarget) { |
| 4831 | EVT VT = Op.getValueType(); |
| 4832 | if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP()) |
| 4833 | return SDValue(); |
| 4834 | if (!VT.isSimple()) |
| 4835 | return SDValue(); |
| 4836 | |
| 4837 | unsigned NewOpcode; |
| 4838 | bool IsAdd = Op->getOpcode() == ISD::SADDSAT; |
| 4839 | switch (VT.getSimpleVT().SimpleTy) { |
| 4840 | default: |
| 4841 | return SDValue(); |
| 4842 | case MVT::i8: |
| 4843 | NewOpcode = IsAdd ? ARMISD::QADD8b : ARMISD::QSUB8b; |
| 4844 | break; |
| 4845 | case MVT::i16: |
| 4846 | NewOpcode = IsAdd ? ARMISD::QADD16b : ARMISD::QSUB16b; |
| 4847 | break; |
| 4848 | } |
| 4849 | |
| 4850 | SDLoc dl(Op); |
| 4851 | SDValue Add = |
| 4852 | DAG.getNode(NewOpcode, dl, MVT::i32, |
| 4853 | DAG.getSExtOrTrunc(Op->getOperand(0), dl, MVT::i32), |
| 4854 | DAG.getSExtOrTrunc(Op->getOperand(1), dl, MVT::i32)); |
| 4855 | return DAG.getNode(ISD::TRUNCATE, dl, VT, Add); |
| 4856 | } |
| 4857 | |
| 4858 | SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { |
| 4859 | SDValue Cond = Op.getOperand(0); |
| 4860 | SDValue SelectTrue = Op.getOperand(1); |
| 4861 | SDValue SelectFalse = Op.getOperand(2); |
| 4862 | SDLoc dl(Op); |
| 4863 | unsigned Opc = Cond.getOpcode(); |
| 4864 | |
| 4865 | if (Cond.getResNo() == 1 && |
| 4866 | (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO || |
| 4867 | Opc == ISD::USUBO)) { |
| 4868 | if (!DAG.getTargetLoweringInfo().isTypeLegal(Cond->getValueType(0))) |
| 4869 | return SDValue(); |
| 4870 | |
| 4871 | SDValue Value, OverflowCmp; |
| 4872 | SDValue ARMcc; |
| 4873 | std::tie(Value, OverflowCmp) = getARMXALUOOp(Cond, DAG, ARMcc); |
| 4874 | SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); |
| 4875 | EVT VT = Op.getValueType(); |
| 4876 | |
| 4877 | return getCMOV(dl, VT, SelectTrue, SelectFalse, ARMcc, CCR, |
| 4878 | OverflowCmp, DAG); |
| 4879 | } |
| 4880 | |
| 4881 | // Convert: |
| 4882 | // |
| 4883 | // (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond) |
| 4884 | // (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond) |
| 4885 | // |
| 4886 | if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) { |
| 4887 | const ConstantSDNode *CMOVTrue = |
| 4888 | dyn_cast<ConstantSDNode>(Cond.getOperand(0)); |
| 4889 | const ConstantSDNode *CMOVFalse = |
| 4890 | dyn_cast<ConstantSDNode>(Cond.getOperand(1)); |
| 4891 | |
| 4892 | if (CMOVTrue && CMOVFalse) { |
| 4893 | unsigned CMOVTrueVal = CMOVTrue->getZExtValue(); |
| 4894 | unsigned CMOVFalseVal = CMOVFalse->getZExtValue(); |
| 4895 | |
| 4896 | SDValue True; |
| 4897 | SDValue False; |
| 4898 | if (CMOVTrueVal == 1 && CMOVFalseVal == 0) { |
| 4899 | True = SelectTrue; |
| 4900 | False = SelectFalse; |
| 4901 | } else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) { |
| 4902 | True = SelectFalse; |
| 4903 | False = SelectTrue; |
| 4904 | } |
| 4905 | |
| 4906 | if (True.getNode() && False.getNode()) { |
| 4907 | EVT VT = Op.getValueType(); |
| 4908 | SDValue ARMcc = Cond.getOperand(2); |
| 4909 | SDValue CCR = Cond.getOperand(3); |
| 4910 | SDValue Cmp = duplicateCmp(Cond.getOperand(4), DAG); |
| 4911 | assert(True.getValueType() == VT); |
| 4912 | return getCMOV(dl, VT, True, False, ARMcc, CCR, Cmp, DAG); |
| 4913 | } |
| 4914 | } |
| 4915 | } |
| 4916 | |
| 4917 | // ARM's BooleanContents value is UndefinedBooleanContent. Mask out the |
| 4918 | // undefined bits before doing a full-word comparison with zero. |
| 4919 | Cond = DAG.getNode(ISD::AND, dl, Cond.getValueType(), Cond, |
| 4920 | DAG.getConstant(1, dl, Cond.getValueType())); |
| 4921 | |
| 4922 | return DAG.getSelectCC(dl, Cond, |
| 4923 | DAG.getConstant(0, dl, Cond.getValueType()), |
| 4924 | SelectTrue, SelectFalse, ISD::SETNE); |
| 4925 | } |
| 4926 | |
| 4927 | static void checkVSELConstraints(ISD::CondCode CC, ARMCC::CondCodes &CondCode, |
| 4928 | bool &swpCmpOps, bool &swpVselOps) { |
| 4929 | // Start by selecting the GE condition code for opcodes that return true for |
| 4930 | // 'equality' |
| 4931 | if (CC == ISD::SETUGE || CC == ISD::SETOGE || CC == ISD::SETOLE || |
| 4932 | CC == ISD::SETULE || CC == ISD::SETGE || CC == ISD::SETLE) |
| 4933 | CondCode = ARMCC::GE; |
| 4934 | |
| 4935 | // and GT for opcodes that return false for 'equality'. |
| 4936 | else if (CC == ISD::SETUGT || CC == ISD::SETOGT || CC == ISD::SETOLT || |
| 4937 | CC == ISD::SETULT || CC == ISD::SETGT || CC == ISD::SETLT) |
| 4938 | CondCode = ARMCC::GT; |
| 4939 | |
| 4940 | // Since we are constrained to GE/GT, if the opcode contains 'less', we need |
| 4941 | // to swap the compare operands. |
| 4942 | if (CC == ISD::SETOLE || CC == ISD::SETULE || CC == ISD::SETOLT || |
| 4943 | CC == ISD::SETULT || CC == ISD::SETLE || CC == ISD::SETLT) |
| 4944 | swpCmpOps = true; |
| 4945 | |
| 4946 | // Both GT and GE are ordered comparisons, and return false for 'unordered'. |
| 4947 | // If we have an unordered opcode, we need to swap the operands to the VSEL |
| 4948 | // instruction (effectively negating the condition). |
| 4949 | // |
| 4950 | // This also has the effect of swapping which one of 'less' or 'greater' |
| 4951 | // returns true, so we also swap the compare operands. It also switches |
| 4952 | // whether we return true for 'equality', so we compensate by picking the |
| 4953 | // opposite condition code to our original choice. |
| 4954 | if (CC == ISD::SETULE || CC == ISD::SETULT || CC == ISD::SETUGE || |
| 4955 | CC == ISD::SETUGT) { |
| 4956 | swpCmpOps = !swpCmpOps; |
| 4957 | swpVselOps = !swpVselOps; |
| 4958 | CondCode = CondCode == ARMCC::GT ? ARMCC::GE : ARMCC::GT; |
| 4959 | } |
| 4960 | |
| 4961 | // 'ordered' is 'anything but unordered', so use the VS condition code and |
| 4962 | // swap the VSEL operands. |
| 4963 | if (CC == ISD::SETO) { |
| 4964 | CondCode = ARMCC::VS; |
| 4965 | swpVselOps = true; |
| 4966 | } |
| 4967 | |
| 4968 | // 'unordered or not equal' is 'anything but equal', so use the EQ condition |
| 4969 | // code and swap the VSEL operands. Also do this if we don't care about the |
| 4970 | // unordered case. |
| 4971 | if (CC == ISD::SETUNE || CC == ISD::SETNE) { |
| 4972 | CondCode = ARMCC::EQ; |
| 4973 | swpVselOps = true; |
| 4974 | } |
| 4975 | } |
| 4976 | |
| 4977 | SDValue ARMTargetLowering::getCMOV(const SDLoc &dl, EVT VT, SDValue FalseVal, |
| 4978 | SDValue TrueVal, SDValue ARMcc, SDValue CCR, |
| 4979 | SDValue Cmp, SelectionDAG &DAG) const { |
| 4980 | if (!Subtarget->hasFP64() && VT == MVT::f64) { |
| 4981 | FalseVal = DAG.getNode(ARMISD::VMOVRRD, dl, |
| 4982 | DAG.getVTList(MVT::i32, MVT::i32), FalseVal); |
| 4983 | TrueVal = DAG.getNode(ARMISD::VMOVRRD, dl, |
| 4984 | DAG.getVTList(MVT::i32, MVT::i32), TrueVal); |
| 4985 | |
| 4986 | SDValue TrueLow = TrueVal.getValue(0); |
| 4987 | SDValue TrueHigh = TrueVal.getValue(1); |
| 4988 | SDValue FalseLow = FalseVal.getValue(0); |
| 4989 | SDValue FalseHigh = FalseVal.getValue(1); |
| 4990 | |
| 4991 | SDValue Low = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, FalseLow, TrueLow, |
| 4992 | ARMcc, CCR, Cmp); |
| 4993 | SDValue High = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, FalseHigh, TrueHigh, |
| 4994 | ARMcc, CCR, duplicateCmp(Cmp, DAG)); |
| 4995 | |
| 4996 | return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Low, High); |
| 4997 | } else { |
| 4998 | return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, CCR, |
| 4999 | Cmp); |
| 5000 | } |
| 5001 | } |
| 5002 | |
| 5003 | static bool isGTorGE(ISD::CondCode CC) { |
| 5004 | return CC == ISD::SETGT || CC == ISD::SETGE; |
| 5005 | } |
| 5006 | |
| 5007 | static bool isLTorLE(ISD::CondCode CC) { |
| 5008 | return CC == ISD::SETLT || CC == ISD::SETLE; |
| 5009 | } |
| 5010 | |
| 5011 | // See if a conditional (LHS CC RHS ? TrueVal : FalseVal) is lower-saturating. |
| 5012 | // All of these conditions (and their <= and >= counterparts) will do: |
| 5013 | // x < k ? k : x |
| 5014 | // x > k ? x : k |
| 5015 | // k < x ? x : k |
| 5016 | // k > x ? k : x |
| 5017 | static bool isLowerSaturate(const SDValue LHS, const SDValue RHS, |
| 5018 | const SDValue TrueVal, const SDValue FalseVal, |
| 5019 | const ISD::CondCode CC, const SDValue K) { |
| 5020 | return (isGTorGE(CC) && |
| 5021 | ((K == LHS && K == TrueVal) || (K == RHS && K == FalseVal))) || |
| 5022 | (isLTorLE(CC) && |
| 5023 | ((K == RHS && K == TrueVal) || (K == LHS && K == FalseVal))); |
| 5024 | } |
| 5025 | |
| 5026 | // Check if two chained conditionals could be converted into SSAT or USAT. |
| 5027 | // |
| 5028 | // SSAT can replace a set of two conditional selectors that bound a number to an |
| 5029 | // interval of type [k, ~k] when k + 1 is a power of 2. Here are some examples: |
| 5030 | // |
| 5031 | // x < -k ? -k : (x > k ? k : x) |
| 5032 | // x < -k ? -k : (x < k ? x : k) |
| 5033 | // x > -k ? (x > k ? k : x) : -k |
| 5034 | // x < k ? (x < -k ? -k : x) : k |
| 5035 | // etc. |
| 5036 | // |
| 5037 | // LLVM canonicalizes these to either a min(max()) or a max(min()) |
| 5038 | // pattern. This function tries to match one of these and will return a SSAT |
| 5039 | // node if successful. |
| 5040 | // |
| 5041 | // USAT works similarily to SSAT but bounds on the interval [0, k] where k + 1 |
| 5042 | // is a power of 2. |
| 5043 | static SDValue LowerSaturatingConditional(SDValue Op, SelectionDAG &DAG) { |
| 5044 | EVT VT = Op.getValueType(); |
| 5045 | SDValue V1 = Op.getOperand(0); |
| 5046 | SDValue K1 = Op.getOperand(1); |
| 5047 | SDValue TrueVal1 = Op.getOperand(2); |
| 5048 | SDValue FalseVal1 = Op.getOperand(3); |
| 5049 | ISD::CondCode CC1 = cast<CondCodeSDNode>(Op.getOperand(4))->get(); |
| 5050 | |
| 5051 | const SDValue Op2 = isa<ConstantSDNode>(TrueVal1) ? FalseVal1 : TrueVal1; |
| 5052 | if (Op2.getOpcode() != ISD::SELECT_CC) |
| 5053 | return SDValue(); |
| 5054 | |
| 5055 | SDValue V2 = Op2.getOperand(0); |
| 5056 | SDValue K2 = Op2.getOperand(1); |
| 5057 | SDValue TrueVal2 = Op2.getOperand(2); |
| 5058 | SDValue FalseVal2 = Op2.getOperand(3); |
| 5059 | ISD::CondCode CC2 = cast<CondCodeSDNode>(Op2.getOperand(4))->get(); |
| 5060 | |
| 5061 | SDValue V1Tmp = V1; |
| 5062 | SDValue V2Tmp = V2; |
| 5063 | |
| 5064 | // Check that the registers and the constants match a max(min()) or min(max()) |
| 5065 | // pattern |
| 5066 | if (V1Tmp != TrueVal1 || V2Tmp != TrueVal2 || K1 != FalseVal1 || |
| 5067 | K2 != FalseVal2 || |
| 5068 | !((isGTorGE(CC1) && isLTorLE(CC2)) || (isLTorLE(CC1) && isGTorGE(CC2)))) |
| 5069 | return SDValue(); |
| 5070 | |
| 5071 | // Check that the constant in the lower-bound check is |
| 5072 | // the opposite of the constant in the upper-bound check |
| 5073 | // in 1's complement. |
| 5074 | if (!isa<ConstantSDNode>(K1) || !isa<ConstantSDNode>(K2)) |
| 5075 | return SDValue(); |
| 5076 | |
| 5077 | int64_t Val1 = cast<ConstantSDNode>(K1)->getSExtValue(); |
| 5078 | int64_t Val2 = cast<ConstantSDNode>(K2)->getSExtValue(); |
| 5079 | int64_t PosVal = std::max(Val1, Val2); |
| 5080 | int64_t NegVal = std::min(Val1, Val2); |
| 5081 | |
| 5082 | if (!((Val1 > Val2 && isLTorLE(CC1)) || (Val1 < Val2 && isLTorLE(CC2))) || |
| 5083 | !isPowerOf2_64(PosVal + 1)) |
| 5084 | return SDValue(); |
| 5085 | |
| 5086 | // Handle the difference between USAT (unsigned) and SSAT (signed) |
| 5087 | // saturation |
| 5088 | // At this point, PosVal is guaranteed to be positive |
| 5089 | uint64_t K = PosVal; |
| 5090 | SDLoc dl(Op); |
| 5091 | if (Val1 == ~Val2) |
| 5092 | return DAG.getNode(ARMISD::SSAT, dl, VT, V2Tmp, |
| 5093 | DAG.getConstant(countTrailingOnes(K), dl, VT)); |
| 5094 | if (NegVal == 0) |
| 5095 | return DAG.getNode(ARMISD::USAT, dl, VT, V2Tmp, |
| 5096 | DAG.getConstant(countTrailingOnes(K), dl, VT)); |
| 5097 | |
| 5098 | return SDValue(); |
| 5099 | } |
| 5100 | |
| 5101 | // Check if a condition of the type x < k ? k : x can be converted into a |
| 5102 | // bit operation instead of conditional moves. |
| 5103 | // Currently this is allowed given: |
| 5104 | // - The conditions and values match up |
| 5105 | // - k is 0 or -1 (all ones) |
| 5106 | // This function will not check the last condition, thats up to the caller |
| 5107 | // It returns true if the transformation can be made, and in such case |
| 5108 | // returns x in V, and k in SatK. |
| 5109 | static bool isLowerSaturatingConditional(const SDValue &Op, SDValue &V, |
| 5110 | SDValue &SatK) |
| 5111 | { |
| 5112 | SDValue LHS = Op.getOperand(0); |
| 5113 | SDValue RHS = Op.getOperand(1); |
| 5114 | ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); |
| 5115 | SDValue TrueVal = Op.getOperand(2); |
| 5116 | SDValue FalseVal = Op.getOperand(3); |
| 5117 | |
| 5118 | SDValue *K = isa<ConstantSDNode>(LHS) ? &LHS : isa<ConstantSDNode>(RHS) |
| 5119 | ? &RHS |
| 5120 | : nullptr; |
| 5121 | |
| 5122 | // No constant operation in comparison, early out |
| 5123 | if (!K) |
| 5124 | return false; |
| 5125 | |
| 5126 | SDValue KTmp = isa<ConstantSDNode>(TrueVal) ? TrueVal : FalseVal; |
| 5127 | V = (KTmp == TrueVal) ? FalseVal : TrueVal; |
| 5128 | SDValue VTmp = (K && *K == LHS) ? RHS : LHS; |
| 5129 | |
| 5130 | // If the constant on left and right side, or variable on left and right, |
| 5131 | // does not match, early out |
| 5132 | if (*K != KTmp || V != VTmp) |
| 5133 | return false; |
| 5134 | |
| 5135 | if (isLowerSaturate(LHS, RHS, TrueVal, FalseVal, CC, *K)) { |
| 5136 | SatK = *K; |
| 5137 | return true; |
| 5138 | } |
| 5139 | |
| 5140 | return false; |
| 5141 | } |
| 5142 | |
| 5143 | bool ARMTargetLowering::isUnsupportedFloatingType(EVT VT) const { |
| 5144 | if (VT == MVT::f32) |
| 5145 | return !Subtarget->hasVFP2Base(); |
| 5146 | if (VT == MVT::f64) |
| 5147 | return !Subtarget->hasFP64(); |
| 5148 | if (VT == MVT::f16) |
| 5149 | return !Subtarget->hasFullFP16(); |
| 5150 | return false; |
| 5151 | } |
| 5152 | |
| 5153 | SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { |
| 5154 | EVT VT = Op.getValueType(); |
| 5155 | SDLoc dl(Op); |
| 5156 | |
| 5157 | // Try to convert two saturating conditional selects into a single SSAT |
| 5158 | if ((!Subtarget->isThumb() && Subtarget->hasV6Ops()) || Subtarget->isThumb2()) |
| 5159 | if (SDValue SatValue = LowerSaturatingConditional(Op, DAG)) |
| 5160 | return SatValue; |
| 5161 | |
| 5162 | // Try to convert expressions of the form x < k ? k : x (and similar forms) |
| 5163 | // into more efficient bit operations, which is possible when k is 0 or -1 |
| 5164 | // On ARM and Thumb-2 which have flexible operand 2 this will result in |
| 5165 | // single instructions. On Thumb the shift and the bit operation will be two |
| 5166 | // instructions. |
| 5167 | // Only allow this transformation on full-width (32-bit) operations |
| 5168 | SDValue LowerSatConstant; |
| 5169 | SDValue SatValue; |
| 5170 | if (VT == MVT::i32 && |
| 5171 | isLowerSaturatingConditional(Op, SatValue, LowerSatConstant)) { |
| 5172 | SDValue ShiftV = DAG.getNode(ISD::SRA, dl, VT, SatValue, |
| 5173 | DAG.getConstant(31, dl, VT)); |
| 5174 | if (isNullConstant(LowerSatConstant)) { |
| 5175 | SDValue NotShiftV = DAG.getNode(ISD::XOR, dl, VT, ShiftV, |
| 5176 | DAG.getAllOnesConstant(dl, VT)); |
| 5177 | return DAG.getNode(ISD::AND, dl, VT, SatValue, NotShiftV); |
| 5178 | } else if (isAllOnesConstant(LowerSatConstant)) |
| 5179 | return DAG.getNode(ISD::OR, dl, VT, SatValue, ShiftV); |
| 5180 | } |
| 5181 | |
| 5182 | SDValue LHS = Op.getOperand(0); |
| 5183 | SDValue RHS = Op.getOperand(1); |
| 5184 | ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); |
| 5185 | SDValue TrueVal = Op.getOperand(2); |
| 5186 | SDValue FalseVal = Op.getOperand(3); |
| 5187 | ConstantSDNode *CFVal = dyn_cast<ConstantSDNode>(FalseVal); |
| 5188 | ConstantSDNode *CTVal = dyn_cast<ConstantSDNode>(TrueVal); |
| 5189 | |
| 5190 | if (Subtarget->hasV8_1MMainlineOps() && CFVal && CTVal && |
| 5191 | LHS.getValueType() == MVT::i32 && RHS.getValueType() == MVT::i32) { |
| 5192 | unsigned TVal = CTVal->getZExtValue(); |
| 5193 | unsigned FVal = CFVal->getZExtValue(); |
| 5194 | unsigned Opcode = 0; |
| 5195 | |
| 5196 | if (TVal == ~FVal) { |
| 5197 | Opcode = ARMISD::CSINV; |
| 5198 | } else if (TVal == ~FVal + 1) { |
| 5199 | Opcode = ARMISD::CSNEG; |
| 5200 | } else if (TVal + 1 == FVal) { |
| 5201 | Opcode = ARMISD::CSINC; |
| 5202 | } else if (TVal == FVal + 1) { |
| 5203 | Opcode = ARMISD::CSINC; |
| 5204 | std::swap(TrueVal, FalseVal); |
| 5205 | std::swap(TVal, FVal); |
| 5206 | CC = ISD::getSetCCInverse(CC, LHS.getValueType()); |
| 5207 | } |
| 5208 | |
| 5209 | if (Opcode) { |
| 5210 | // If one of the constants is cheaper than another, materialise the |
| 5211 | // cheaper one and let the csel generate the other. |
| 5212 | if (Opcode != ARMISD::CSINC && |
| 5213 | HasLowerConstantMaterializationCost(FVal, TVal, Subtarget)) { |
| 5214 | std::swap(TrueVal, FalseVal); |
| 5215 | std::swap(TVal, FVal); |
| 5216 | CC = ISD::getSetCCInverse(CC, LHS.getValueType()); |
| 5217 | } |
| 5218 | |
| 5219 | // Attempt to use ZR checking TVal is 0, possibly inverting the condition |
| 5220 | // to get there. CSINC not is invertable like the other two (~(~a) == a, |
| 5221 | // -(-a) == a, but (a+1)+1 != a). |
| 5222 | if (FVal == 0 && Opcode != ARMISD::CSINC) { |
| 5223 | std::swap(TrueVal, FalseVal); |
| 5224 | std::swap(TVal, FVal); |
| 5225 | CC = ISD::getSetCCInverse(CC, LHS.getValueType()); |
| 5226 | } |
| 5227 | if (TVal == 0) |
| 5228 | TrueVal = DAG.getRegister(ARM::ZR, MVT::i32); |
| 5229 | |
| 5230 | // Drops F's value because we can get it by inverting/negating TVal. |
| 5231 | FalseVal = TrueVal; |
| 5232 | |
| 5233 | SDValue ARMcc; |
| 5234 | SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); |
| 5235 | EVT VT = TrueVal.getValueType(); |
| 5236 | return DAG.getNode(Opcode, dl, VT, TrueVal, FalseVal, ARMcc, Cmp); |
| 5237 | } |
| 5238 | } |
| 5239 | |
| 5240 | if (isUnsupportedFloatingType(LHS.getValueType())) { |
| 5241 | DAG.getTargetLoweringInfo().softenSetCCOperands( |
| 5242 | DAG, LHS.getValueType(), LHS, RHS, CC, dl, LHS, RHS); |
| 5243 | |
| 5244 | // If softenSetCCOperands only returned one value, we should compare it to |
| 5245 | // zero. |
| 5246 | if (!RHS.getNode()) { |
| 5247 | RHS = DAG.getConstant(0, dl, LHS.getValueType()); |
| 5248 | CC = ISD::SETNE; |
| 5249 | } |
| 5250 | } |
| 5251 | |
| 5252 | if (LHS.getValueType() == MVT::i32) { |
| 5253 | // Try to generate VSEL on ARMv8. |
| 5254 | // The VSEL instruction can't use all the usual ARM condition |
| 5255 | // codes: it only has two bits to select the condition code, so it's |
| 5256 | // constrained to use only GE, GT, VS and EQ. |
| 5257 | // |
| 5258 | // To implement all the various ISD::SETXXX opcodes, we sometimes need to |
| 5259 | // swap the operands of the previous compare instruction (effectively |
| 5260 | // inverting the compare condition, swapping 'less' and 'greater') and |
| 5261 | // sometimes need to swap the operands to the VSEL (which inverts the |
| 5262 | // condition in the sense of firing whenever the previous condition didn't) |
| 5263 | if (Subtarget->hasFPARMv8Base() && (TrueVal.getValueType() == MVT::f16 || |
| 5264 | TrueVal.getValueType() == MVT::f32 || |
| 5265 | TrueVal.getValueType() == MVT::f64)) { |
| 5266 | ARMCC::CondCodes CondCode = IntCCToARMCC(CC); |
| 5267 | if (CondCode == ARMCC::LT || CondCode == ARMCC::LE || |
| 5268 | CondCode == ARMCC::VC || CondCode == ARMCC::NE) { |
| 5269 | CC = ISD::getSetCCInverse(CC, LHS.getValueType()); |
| 5270 | std::swap(TrueVal, FalseVal); |
| 5271 | } |
| 5272 | } |
| 5273 | |
| 5274 | SDValue ARMcc; |
| 5275 | SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); |
| 5276 | SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); |
| 5277 | // Choose GE over PL, which vsel does now support |
| 5278 | if (cast<ConstantSDNode>(ARMcc)->getZExtValue() == ARMCC::PL) |
| 5279 | ARMcc = DAG.getConstant(ARMCC::GE, dl, MVT::i32); |
| 5280 | return getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG); |
| 5281 | } |
| 5282 | |
| 5283 | ARMCC::CondCodes CondCode, CondCode2; |
| 5284 | FPCCToARMCC(CC, CondCode, CondCode2); |
| 5285 | |
| 5286 | // Normalize the fp compare. If RHS is zero we prefer to keep it there so we |
| 5287 | // match CMPFPw0 instead of CMPFP, though we don't do this for f16 because we |
| 5288 | // must use VSEL (limited condition codes), due to not having conditional f16 |
| 5289 | // moves. |
| 5290 | if (Subtarget->hasFPARMv8Base() && |
| 5291 | !(isFloatingPointZero(RHS) && TrueVal.getValueType() != MVT::f16) && |
| 5292 | (TrueVal.getValueType() == MVT::f16 || |
| 5293 | TrueVal.getValueType() == MVT::f32 || |
| 5294 | TrueVal.getValueType() == MVT::f64)) { |
| 5295 | bool swpCmpOps = false; |
| 5296 | bool swpVselOps = false; |
| 5297 | checkVSELConstraints(CC, CondCode, swpCmpOps, swpVselOps); |
| 5298 | |
| 5299 | if (CondCode == ARMCC::GT || CondCode == ARMCC::GE || |
| 5300 | CondCode == ARMCC::VS || CondCode == ARMCC::EQ) { |
| 5301 | if (swpCmpOps) |
| 5302 | std::swap(LHS, RHS); |
| 5303 | if (swpVselOps) |
| 5304 | std::swap(TrueVal, FalseVal); |
| 5305 | } |
| 5306 | } |
| 5307 | |
| 5308 | SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32); |
| 5309 | SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); |
| 5310 | SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); |
| 5311 | SDValue Result = getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG); |
| 5312 | if (CondCode2 != ARMCC::AL) { |
| 5313 | SDValue ARMcc2 = DAG.getConstant(CondCode2, dl, MVT::i32); |
| 5314 | // FIXME: Needs another CMP because flag can have but one use. |
| 5315 | SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl); |
| 5316 | Result = getCMOV(dl, VT, Result, TrueVal, ARMcc2, CCR, Cmp2, DAG); |
| 5317 | } |
| 5318 | return Result; |
| 5319 | } |
| 5320 | |
| 5321 | /// canChangeToInt - Given the fp compare operand, return true if it is suitable |
| 5322 | /// to morph to an integer compare sequence. |
| 5323 | static bool canChangeToInt(SDValue Op, bool &SeenZero, |
| 5324 | const ARMSubtarget *Subtarget) { |
| 5325 | SDNode *N = Op.getNode(); |
| 5326 | if (!N->hasOneUse()) |
| 5327 | // Otherwise it requires moving the value from fp to integer registers. |
| 5328 | return false; |
| 5329 | if (!N->getNumValues()) |
| 5330 | return false; |
| 5331 | EVT VT = Op.getValueType(); |
| 5332 | if (VT != MVT::f32 && !Subtarget->isFPBrccSlow()) |
| 5333 | // f32 case is generally profitable. f64 case only makes sense when vcmpe + |
| 5334 | // vmrs are very slow, e.g. cortex-a8. |
| 5335 | return false; |
| 5336 | |
| 5337 | if (isFloatingPointZero(Op)) { |
| 5338 | SeenZero = true; |
| 5339 | return true; |
| 5340 | } |
| 5341 | return ISD::isNormalLoad(N); |
| 5342 | } |
| 5343 | |
| 5344 | static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) { |
| 5345 | if (isFloatingPointZero(Op)) |
| 5346 | return DAG.getConstant(0, SDLoc(Op), MVT::i32); |
| 5347 | |
| 5348 | if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) |
| 5349 | return DAG.getLoad(MVT::i32, SDLoc(Op), Ld->getChain(), Ld->getBasePtr(), |
| 5350 | Ld->getPointerInfo(), Ld->getAlignment(), |
| 5351 | Ld->getMemOperand()->getFlags()); |
| 5352 | |
| 5353 | llvm_unreachable("Unknown VFP cmp argument!" ); |
| 5354 | } |
| 5355 | |
| 5356 | static void expandf64Toi32(SDValue Op, SelectionDAG &DAG, |
| 5357 | SDValue &RetVal1, SDValue &RetVal2) { |
| 5358 | SDLoc dl(Op); |
| 5359 | |
| 5360 | if (isFloatingPointZero(Op)) { |
| 5361 | RetVal1 = DAG.getConstant(0, dl, MVT::i32); |
| 5362 | RetVal2 = DAG.getConstant(0, dl, MVT::i32); |
| 5363 | return; |
| 5364 | } |
| 5365 | |
| 5366 | if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) { |
| 5367 | SDValue Ptr = Ld->getBasePtr(); |
| 5368 | RetVal1 = |
| 5369 | DAG.getLoad(MVT::i32, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(), |
| 5370 | Ld->getAlignment(), Ld->getMemOperand()->getFlags()); |
| 5371 | |
| 5372 | EVT PtrType = Ptr.getValueType(); |
| 5373 | unsigned NewAlign = MinAlign(Ld->getAlignment(), 4); |
| 5374 | SDValue NewPtr = DAG.getNode(ISD::ADD, dl, |
| 5375 | PtrType, Ptr, DAG.getConstant(4, dl, PtrType)); |
| 5376 | RetVal2 = DAG.getLoad(MVT::i32, dl, Ld->getChain(), NewPtr, |
| 5377 | Ld->getPointerInfo().getWithOffset(4), NewAlign, |
| 5378 | Ld->getMemOperand()->getFlags()); |
| 5379 | return; |
| 5380 | } |
| 5381 | |
| 5382 | llvm_unreachable("Unknown VFP cmp argument!" ); |
| 5383 | } |
| 5384 | |
| 5385 | /// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some |
| 5386 | /// f32 and even f64 comparisons to integer ones. |
| 5387 | SDValue |
| 5388 | ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const { |
| 5389 | SDValue Chain = Op.getOperand(0); |
| 5390 | ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); |
| 5391 | SDValue LHS = Op.getOperand(2); |
| 5392 | SDValue RHS = Op.getOperand(3); |
| 5393 | SDValue Dest = Op.getOperand(4); |
| 5394 | SDLoc dl(Op); |
| 5395 | |
| 5396 | bool LHSSeenZero = false; |
| 5397 | bool LHSOk = canChangeToInt(LHS, LHSSeenZero, Subtarget); |
| 5398 | bool RHSSeenZero = false; |
| 5399 | bool RHSOk = canChangeToInt(RHS, RHSSeenZero, Subtarget); |
| 5400 | if (LHSOk && RHSOk && (LHSSeenZero || RHSSeenZero)) { |
| 5401 | // If unsafe fp math optimization is enabled and there are no other uses of |
| 5402 | // the CMP operands, and the condition code is EQ or NE, we can optimize it |
| 5403 | // to an integer comparison. |
| 5404 | if (CC == ISD::SETOEQ) |
| 5405 | CC = ISD::SETEQ; |
| 5406 | else if (CC == ISD::SETUNE) |
| 5407 | CC = ISD::SETNE; |
| 5408 | |
| 5409 | SDValue Mask = DAG.getConstant(0x7fffffff, dl, MVT::i32); |
| 5410 | SDValue ARMcc; |
| 5411 | if (LHS.getValueType() == MVT::f32) { |
| 5412 | LHS = DAG.getNode(ISD::AND, dl, MVT::i32, |
| 5413 | bitcastf32Toi32(LHS, DAG), Mask); |
| 5414 | RHS = DAG.getNode(ISD::AND, dl, MVT::i32, |
| 5415 | bitcastf32Toi32(RHS, DAG), Mask); |
| 5416 | SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); |
| 5417 | SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); |
| 5418 | return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, |
| 5419 | Chain, Dest, ARMcc, CCR, Cmp); |
| 5420 | } |
| 5421 | |
| 5422 | SDValue LHS1, LHS2; |
| 5423 | SDValue RHS1, RHS2; |
| 5424 | expandf64Toi32(LHS, DAG, LHS1, LHS2); |
| 5425 | expandf64Toi32(RHS, DAG, RHS1, RHS2); |
| 5426 | LHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, LHS2, Mask); |
| 5427 | RHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, RHS2, Mask); |
| 5428 | ARMCC::CondCodes CondCode = IntCCToARMCC(CC); |
| 5429 | ARMcc = DAG.getConstant(CondCode, dl, MVT::i32); |
| 5430 | SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); |
| 5431 | SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest }; |
| 5432 | return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops); |
| 5433 | } |
| 5434 | |
| 5435 | return SDValue(); |
| 5436 | } |
| 5437 | |
| 5438 | SDValue ARMTargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { |
| 5439 | SDValue Chain = Op.getOperand(0); |
| 5440 | SDValue Cond = Op.getOperand(1); |
| 5441 | SDValue Dest = Op.getOperand(2); |
| 5442 | SDLoc dl(Op); |
| 5443 | |
| 5444 | // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a branch |
| 5445 | // instruction. |
| 5446 | unsigned Opc = Cond.getOpcode(); |
| 5447 | bool OptimizeMul = (Opc == ISD::SMULO || Opc == ISD::UMULO) && |
| 5448 | !Subtarget->isThumb1Only(); |
| 5449 | if (Cond.getResNo() == 1 && |
| 5450 | (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO || |
| 5451 | Opc == ISD::USUBO || OptimizeMul)) { |
| 5452 | // Only lower legal XALUO ops. |
| 5453 | if (!DAG.getTargetLoweringInfo().isTypeLegal(Cond->getValueType(0))) |
| 5454 | return SDValue(); |
| 5455 | |
| 5456 | // The actual operation with overflow check. |
| 5457 | SDValue Value, OverflowCmp; |
| 5458 | SDValue ARMcc; |
| 5459 | std::tie(Value, OverflowCmp) = getARMXALUOOp(Cond, DAG, ARMcc); |
| 5460 | |
| 5461 | // Reverse the condition code. |
| 5462 | ARMCC::CondCodes CondCode = |
| 5463 | (ARMCC::CondCodes)cast<const ConstantSDNode>(ARMcc)->getZExtValue(); |
| 5464 | CondCode = ARMCC::getOppositeCondition(CondCode); |
| 5465 | ARMcc = DAG.getConstant(CondCode, SDLoc(ARMcc), MVT::i32); |
| 5466 | SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); |
| 5467 | |
| 5468 | return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, Chain, Dest, ARMcc, CCR, |
| 5469 | OverflowCmp); |
| 5470 | } |
| 5471 | |
| 5472 | return SDValue(); |
| 5473 | } |
| 5474 | |
| 5475 | SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { |
| 5476 | SDValue Chain = Op.getOperand(0); |
| 5477 | ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); |
| 5478 | SDValue LHS = Op.getOperand(2); |
| 5479 | SDValue RHS = Op.getOperand(3); |
| 5480 | SDValue Dest = Op.getOperand(4); |
| 5481 | SDLoc dl(Op); |
| 5482 | |
| 5483 | if (isUnsupportedFloatingType(LHS.getValueType())) { |
| 5484 | DAG.getTargetLoweringInfo().softenSetCCOperands( |
| 5485 | DAG, LHS.getValueType(), LHS, RHS, CC, dl, LHS, RHS); |
| 5486 | |
| 5487 | // If softenSetCCOperands only returned one value, we should compare it to |
| 5488 | // zero. |
| 5489 | if (!RHS.getNode()) { |
| 5490 | RHS = DAG.getConstant(0, dl, LHS.getValueType()); |
| 5491 | CC = ISD::SETNE; |
| 5492 | } |
| 5493 | } |
| 5494 | |
| 5495 | // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a branch |
| 5496 | // instruction. |
| 5497 | unsigned Opc = LHS.getOpcode(); |
| 5498 | bool OptimizeMul = (Opc == ISD::SMULO || Opc == ISD::UMULO) && |
| 5499 | !Subtarget->isThumb1Only(); |
| 5500 | if (LHS.getResNo() == 1 && (isOneConstant(RHS) || isNullConstant(RHS)) && |
| 5501 | (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO || |
| 5502 | Opc == ISD::USUBO || OptimizeMul) && |
| 5503 | (CC == ISD::SETEQ || CC == ISD::SETNE)) { |
| 5504 | // Only lower legal XALUO ops. |
| 5505 | if (!DAG.getTargetLoweringInfo().isTypeLegal(LHS->getValueType(0))) |
| 5506 | return SDValue(); |
| 5507 | |
| 5508 | // The actual operation with overflow check. |
| 5509 | SDValue Value, OverflowCmp; |
| 5510 | SDValue ARMcc; |
| 5511 | std::tie(Value, OverflowCmp) = getARMXALUOOp(LHS.getValue(0), DAG, ARMcc); |
| 5512 | |
| 5513 | if ((CC == ISD::SETNE) != isOneConstant(RHS)) { |
| 5514 | // Reverse the condition code. |
| 5515 | ARMCC::CondCodes CondCode = |
| 5516 | (ARMCC::CondCodes)cast<const ConstantSDNode>(ARMcc)->getZExtValue(); |
| 5517 | CondCode = ARMCC::getOppositeCondition(CondCode); |
| 5518 | ARMcc = DAG.getConstant(CondCode, SDLoc(ARMcc), MVT::i32); |
| 5519 | } |
| 5520 | SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); |
| 5521 | |
| 5522 | return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, Chain, Dest, ARMcc, CCR, |
| 5523 | OverflowCmp); |
| 5524 | } |
| 5525 | |
| 5526 | if (LHS.getValueType() == MVT::i32) { |
| 5527 | SDValue ARMcc; |
| 5528 | SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); |
| 5529 | SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); |
| 5530 | return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, |
| 5531 | Chain, Dest, ARMcc, CCR, Cmp); |
| 5532 | } |
| 5533 | |
| 5534 | if (getTargetMachine().Options.UnsafeFPMath && |
| 5535 | (CC == ISD::SETEQ || CC == ISD::SETOEQ || |
| 5536 | CC == ISD::SETNE || CC == ISD::SETUNE)) { |
| 5537 | if (SDValue Result = OptimizeVFPBrcond(Op, DAG)) |
| 5538 | return Result; |
| 5539 | } |
| 5540 | |
| 5541 | ARMCC::CondCodes CondCode, CondCode2; |
| 5542 | FPCCToARMCC(CC, CondCode, CondCode2); |
| 5543 | |
| 5544 | SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32); |
| 5545 | SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); |
| 5546 | SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); |
| 5547 | SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); |
| 5548 | SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp }; |
| 5549 | SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops); |
| 5550 | if (CondCode2 != ARMCC::AL) { |
| 5551 | ARMcc = DAG.getConstant(CondCode2, dl, MVT::i32); |
| 5552 | SDValue Ops[] = { Res, Dest, ARMcc, CCR, Res.getValue(1) }; |
| 5553 | Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops); |
| 5554 | } |
| 5555 | return Res; |
| 5556 | } |
| 5557 | |
| 5558 | SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const { |
| 5559 | SDValue Chain = Op.getOperand(0); |
| 5560 | SDValue Table = Op.getOperand(1); |
| 5561 | SDValue Index = Op.getOperand(2); |
| 5562 | SDLoc dl(Op); |
| 5563 | |
| 5564 | EVT PTy = getPointerTy(DAG.getDataLayout()); |
| 5565 | JumpTableSDNode *JT = cast<JumpTableSDNode>(Table); |
| 5566 | SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy); |
| 5567 | Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI); |
| 5568 | Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, dl, PTy)); |
| 5569 | SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Table, Index); |
| 5570 | if (Subtarget->isThumb2() || (Subtarget->hasV8MBaselineOps() && Subtarget->isThumb())) { |
| 5571 | // Thumb2 and ARMv8-M use a two-level jump. That is, it jumps into the jump table |
| 5572 | // which does another jump to the destination. This also makes it easier |
| 5573 | // to translate it to TBB / TBH later (Thumb2 only). |
| 5574 | // FIXME: This might not work if the function is extremely large. |
| 5575 | return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain, |
| 5576 | Addr, Op.getOperand(2), JTI); |
| 5577 | } |
| 5578 | if (isPositionIndependent() || Subtarget->isROPI()) { |
| 5579 | Addr = |
| 5580 | DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr, |
| 5581 | MachinePointerInfo::getJumpTable(DAG.getMachineFunction())); |
| 5582 | Chain = Addr.getValue(1); |
| 5583 | Addr = DAG.getNode(ISD::ADD, dl, PTy, Table, Addr); |
| 5584 | return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI); |
| 5585 | } else { |
| 5586 | Addr = |
| 5587 | DAG.getLoad(PTy, dl, Chain, Addr, |
| 5588 | MachinePointerInfo::getJumpTable(DAG.getMachineFunction())); |
| 5589 | Chain = Addr.getValue(1); |
| 5590 | return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI); |
| 5591 | } |
| 5592 | } |
| 5593 | |
| 5594 | static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) { |
| 5595 | EVT VT = Op.getValueType(); |
| 5596 | SDLoc dl(Op); |
| 5597 | |
| 5598 | if (Op.getValueType().getVectorElementType() == MVT::i32) { |
| 5599 | if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::f32) |
| 5600 | return Op; |
| 5601 | return DAG.UnrollVectorOp(Op.getNode()); |
| 5602 | } |
| 5603 | |
| 5604 | const bool HasFullFP16 = |
| 5605 | static_cast<const ARMSubtarget&>(DAG.getSubtarget()).hasFullFP16(); |
| 5606 | |
| 5607 | EVT NewTy; |
| 5608 | const EVT OpTy = Op.getOperand(0).getValueType(); |
| 5609 | if (OpTy == MVT::v4f32) |
| 5610 | NewTy = MVT::v4i32; |
| 5611 | else if (OpTy == MVT::v4f16 && HasFullFP16) |
| 5612 | NewTy = MVT::v4i16; |
| 5613 | else if (OpTy == MVT::v8f16 && HasFullFP16) |
| 5614 | NewTy = MVT::v8i16; |
| 5615 | else |
| 5616 | llvm_unreachable("Invalid type for custom lowering!" ); |
| 5617 | |
| 5618 | if (VT != MVT::v4i16 && VT != MVT::v8i16) |
| 5619 | return DAG.UnrollVectorOp(Op.getNode()); |
| 5620 | |
| 5621 | Op = DAG.getNode(Op.getOpcode(), dl, NewTy, Op.getOperand(0)); |
| 5622 | return DAG.getNode(ISD::TRUNCATE, dl, VT, Op); |
| 5623 | } |
| 5624 | |
| 5625 | SDValue ARMTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const { |
| 5626 | EVT VT = Op.getValueType(); |
| 5627 | if (VT.isVector()) |
| 5628 | return LowerVectorFP_TO_INT(Op, DAG); |
| 5629 | |
| 5630 | bool IsStrict = Op->isStrictFPOpcode(); |
| 5631 | SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0); |
| 5632 | |
| 5633 | if (isUnsupportedFloatingType(SrcVal.getValueType())) { |
| 5634 | RTLIB::Libcall LC; |
| 5635 | if (Op.getOpcode() == ISD::FP_TO_SINT || |
| 5636 | Op.getOpcode() == ISD::STRICT_FP_TO_SINT) |
| 5637 | LC = RTLIB::getFPTOSINT(SrcVal.getValueType(), |
| 5638 | Op.getValueType()); |
| 5639 | else |
| 5640 | LC = RTLIB::getFPTOUINT(SrcVal.getValueType(), |
| 5641 | Op.getValueType()); |
| 5642 | SDLoc Loc(Op); |
| 5643 | MakeLibCallOptions CallOptions; |
| 5644 | SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue(); |
| 5645 | SDValue Result; |
| 5646 | std::tie(Result, Chain) = makeLibCall(DAG, LC, Op.getValueType(), SrcVal, |
| 5647 | CallOptions, Loc, Chain); |
| 5648 | return IsStrict ? DAG.getMergeValues({Result, Chain}, Loc) : Result; |
| 5649 | } |
| 5650 | |
| 5651 | // FIXME: Remove this when we have strict fp instruction selection patterns |
| 5652 | if (IsStrict) { |
| 5653 | SDLoc Loc(Op); |
| 5654 | SDValue Result = |
| 5655 | DAG.getNode(Op.getOpcode() == ISD::STRICT_FP_TO_SINT ? ISD::FP_TO_SINT |
| 5656 | : ISD::FP_TO_UINT, |
| 5657 | Loc, Op.getValueType(), SrcVal); |
| 5658 | return DAG.getMergeValues({Result, Op.getOperand(0)}, Loc); |
| 5659 | } |
| 5660 | |
| 5661 | return Op; |
| 5662 | } |
| 5663 | |
| 5664 | static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) { |
| 5665 | EVT VT = Op.getValueType(); |
| 5666 | SDLoc dl(Op); |
| 5667 | |
| 5668 | if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i32) { |
| 5669 | if (VT.getVectorElementType() == MVT::f32) |
| 5670 | return Op; |
| 5671 | return DAG.UnrollVectorOp(Op.getNode()); |
| 5672 | } |
| 5673 | |
| 5674 | assert((Op.getOperand(0).getValueType() == MVT::v4i16 || |
| 5675 | Op.getOperand(0).getValueType() == MVT::v8i16) && |
| 5676 | "Invalid type for custom lowering!" ); |
| 5677 | |
| 5678 | const bool HasFullFP16 = |
| 5679 | static_cast<const ARMSubtarget&>(DAG.getSubtarget()).hasFullFP16(); |
| 5680 | |
| 5681 | EVT DestVecType; |
| 5682 | if (VT == MVT::v4f32) |
| 5683 | DestVecType = MVT::v4i32; |
| 5684 | else if (VT == MVT::v4f16 && HasFullFP16) |
| 5685 | DestVecType = MVT::v4i16; |
| 5686 | else if (VT == MVT::v8f16 && HasFullFP16) |
| 5687 | DestVecType = MVT::v8i16; |
| 5688 | else |
| 5689 | return DAG.UnrollVectorOp(Op.getNode()); |
| 5690 | |
| 5691 | unsigned CastOpc; |
| 5692 | unsigned Opc; |
| 5693 | switch (Op.getOpcode()) { |
| 5694 | default: llvm_unreachable("Invalid opcode!" ); |
| 5695 | case ISD::SINT_TO_FP: |
| 5696 | CastOpc = ISD::SIGN_EXTEND; |
| 5697 | Opc = ISD::SINT_TO_FP; |
| 5698 | break; |
| 5699 | case ISD::UINT_TO_FP: |
| 5700 | CastOpc = ISD::ZERO_EXTEND; |
| 5701 | Opc = ISD::UINT_TO_FP; |
| 5702 | break; |
| 5703 | } |
| 5704 | |
| 5705 | Op = DAG.getNode(CastOpc, dl, DestVecType, Op.getOperand(0)); |
| 5706 | return DAG.getNode(Opc, dl, VT, Op); |
| 5707 | } |
| 5708 | |
| 5709 | SDValue ARMTargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const { |
| 5710 | EVT VT = Op.getValueType(); |
| 5711 | if (VT.isVector()) |
| 5712 | return LowerVectorINT_TO_FP(Op, DAG); |
| 5713 | if (isUnsupportedFloatingType(VT)) { |
| 5714 | RTLIB::Libcall LC; |
| 5715 | if (Op.getOpcode() == ISD::SINT_TO_FP) |
| 5716 | LC = RTLIB::getSINTTOFP(Op.getOperand(0).getValueType(), |
| 5717 | Op.getValueType()); |
| 5718 | else |
| 5719 | LC = RTLIB::getUINTTOFP(Op.getOperand(0).getValueType(), |
| 5720 | Op.getValueType()); |
| 5721 | MakeLibCallOptions CallOptions; |
| 5722 | return makeLibCall(DAG, LC, Op.getValueType(), Op.getOperand(0), |
| 5723 | CallOptions, SDLoc(Op)).first; |
| 5724 | } |
| 5725 | |
| 5726 | return Op; |
| 5727 | } |
| 5728 | |
| 5729 | SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { |
| 5730 | // Implement fcopysign with a fabs and a conditional fneg. |
| 5731 | SDValue Tmp0 = Op.getOperand(0); |
| 5732 | SDValue Tmp1 = Op.getOperand(1); |
| 5733 | SDLoc dl(Op); |
| 5734 | EVT VT = Op.getValueType(); |
| 5735 | EVT SrcVT = Tmp1.getValueType(); |
| 5736 | bool InGPR = Tmp0.getOpcode() == ISD::BITCAST || |
| 5737 | Tmp0.getOpcode() == ARMISD::VMOVDRR; |
| 5738 | bool UseNEON = !InGPR && Subtarget->hasNEON(); |
| 5739 | |
| 5740 | if (UseNEON) { |
| 5741 | // Use VBSL to copy the sign bit. |
| 5742 | unsigned EncodedVal = ARM_AM::createVMOVModImm(0x6, 0x80); |
| 5743 | SDValue Mask = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v2i32, |
| 5744 | DAG.getTargetConstant(EncodedVal, dl, MVT::i32)); |
| 5745 | EVT OpVT = (VT == MVT::f32) ? MVT::v2i32 : MVT::v1i64; |
| 5746 | if (VT == MVT::f64) |
| 5747 | Mask = DAG.getNode(ARMISD::VSHLIMM, dl, OpVT, |
| 5748 | DAG.getNode(ISD::BITCAST, dl, OpVT, Mask), |
| 5749 | DAG.getConstant(32, dl, MVT::i32)); |
| 5750 | else /*if (VT == MVT::f32)*/ |
| 5751 | Tmp0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp0); |
| 5752 | if (SrcVT == MVT::f32) { |
| 5753 | Tmp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp1); |
| 5754 | if (VT == MVT::f64) |
| 5755 | Tmp1 = DAG.getNode(ARMISD::VSHLIMM, dl, OpVT, |
| 5756 | DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1), |
| 5757 | DAG.getConstant(32, dl, MVT::i32)); |
| 5758 | } else if (VT == MVT::f32) |
| 5759 | Tmp1 = DAG.getNode(ARMISD::VSHRuIMM, dl, MVT::v1i64, |
| 5760 | DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, Tmp1), |
| 5761 | DAG.getConstant(32, dl, MVT::i32)); |
| 5762 | Tmp0 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp0); |
| 5763 | Tmp1 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1); |
| 5764 | |
| 5765 | SDValue AllOnes = DAG.getTargetConstant(ARM_AM::createVMOVModImm(0xe, 0xff), |
| 5766 | dl, MVT::i32); |
| 5767 | AllOnes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v8i8, AllOnes); |
| 5768 | SDValue MaskNot = DAG.getNode(ISD::XOR, dl, OpVT, Mask, |
| 5769 | DAG.getNode(ISD::BITCAST, dl, OpVT, AllOnes)); |
| 5770 | |
| 5771 | SDValue Res = DAG.getNode(ISD::OR, dl, OpVT, |
| 5772 | DAG.getNode(ISD::AND, dl, OpVT, Tmp1, Mask), |
| 5773 | DAG.getNode(ISD::AND, dl, OpVT, Tmp0, MaskNot)); |
| 5774 | if (VT == MVT::f32) { |
| 5775 | Res = DAG.getNode(ISD::BITCAST, dl, MVT::v2f32, Res); |
| 5776 | Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res, |
| 5777 | DAG.getConstant(0, dl, MVT::i32)); |
| 5778 | } else { |
| 5779 | Res = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Res); |
| 5780 | } |
| 5781 | |
| 5782 | return Res; |
| 5783 | } |
| 5784 | |
| 5785 | // Bitcast operand 1 to i32. |
| 5786 | if (SrcVT == MVT::f64) |
| 5787 | Tmp1 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32), |
| 5788 | Tmp1).getValue(1); |
| 5789 | Tmp1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp1); |
| 5790 | |
| 5791 | // Or in the signbit with integer operations. |
| 5792 | SDValue Mask1 = DAG.getConstant(0x80000000, dl, MVT::i32); |
| 5793 | SDValue Mask2 = DAG.getConstant(0x7fffffff, dl, MVT::i32); |
| 5794 | Tmp1 = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp1, Mask1); |
| 5795 | if (VT == MVT::f32) { |
| 5796 | Tmp0 = DAG.getNode(ISD::AND, dl, MVT::i32, |
| 5797 | DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp0), Mask2); |
| 5798 | return DAG.getNode(ISD::BITCAST, dl, MVT::f32, |
| 5799 | DAG.getNode(ISD::OR, dl, MVT::i32, Tmp0, Tmp1)); |
| 5800 | } |
| 5801 | |
| 5802 | // f64: Or the high part with signbit and then combine two parts. |
| 5803 | Tmp0 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32), |
| 5804 | Tmp0); |
| 5805 | SDValue Lo = Tmp0.getValue(0); |
| 5806 | SDValue Hi = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp0.getValue(1), Mask2); |
| 5807 | Hi = DAG.getNode(ISD::OR, dl, MVT::i32, Hi, Tmp1); |
| 5808 | return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); |
| 5809 | } |
| 5810 | |
| 5811 | SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{ |
| 5812 | MachineFunction &MF = DAG.getMachineFunction(); |
| 5813 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
| 5814 | MFI.setReturnAddressIsTaken(true); |
| 5815 | |
| 5816 | if (verifyReturnAddressArgumentIsConstant(Op, DAG)) |
| 5817 | return SDValue(); |
| 5818 | |
| 5819 | EVT VT = Op.getValueType(); |
| 5820 | SDLoc dl(Op); |
| 5821 | unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); |
| 5822 | if (Depth) { |
| 5823 | SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); |
| 5824 | SDValue Offset = DAG.getConstant(4, dl, MVT::i32); |
| 5825 | return DAG.getLoad(VT, dl, DAG.getEntryNode(), |
| 5826 | DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset), |
| 5827 | MachinePointerInfo()); |
| 5828 | } |
| 5829 | |
| 5830 | // Return LR, which contains the return address. Mark it an implicit live-in. |
| 5831 | unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32)); |
| 5832 | return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT); |
| 5833 | } |
| 5834 | |
| 5835 | SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { |
| 5836 | const ARMBaseRegisterInfo &ARI = |
| 5837 | *static_cast<const ARMBaseRegisterInfo*>(RegInfo); |
| 5838 | MachineFunction &MF = DAG.getMachineFunction(); |
| 5839 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
| 5840 | MFI.setFrameAddressIsTaken(true); |
| 5841 | |
| 5842 | EVT VT = Op.getValueType(); |
| 5843 | SDLoc dl(Op); // FIXME probably not meaningful |
| 5844 | unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); |
| 5845 | Register FrameReg = ARI.getFrameRegister(MF); |
| 5846 | SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); |
| 5847 | while (Depth--) |
| 5848 | FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, |
| 5849 | MachinePointerInfo()); |
| 5850 | return FrameAddr; |
| 5851 | } |
| 5852 | |
| 5853 | // FIXME? Maybe this could be a TableGen attribute on some registers and |
| 5854 | // this table could be generated automatically from RegInfo. |
| 5855 | Register ARMTargetLowering::getRegisterByName(const char* RegName, LLT VT, |
| 5856 | const MachineFunction &MF) const { |
| 5857 | Register Reg = StringSwitch<unsigned>(RegName) |
| 5858 | .Case("sp" , ARM::SP) |
| 5859 | .Default(0); |
| 5860 | if (Reg) |
| 5861 | return Reg; |
| 5862 | report_fatal_error(Twine("Invalid register name \"" |
| 5863 | + StringRef(RegName) + "\"." )); |
| 5864 | } |
| 5865 | |
| 5866 | // Result is 64 bit value so split into two 32 bit values and return as a |
| 5867 | // pair of values. |
| 5868 | static void ExpandREAD_REGISTER(SDNode *N, SmallVectorImpl<SDValue> &Results, |
| 5869 | SelectionDAG &DAG) { |
| 5870 | SDLoc DL(N); |
| 5871 | |
| 5872 | // This function is only supposed to be called for i64 type destination. |
| 5873 | assert(N->getValueType(0) == MVT::i64 |
| 5874 | && "ExpandREAD_REGISTER called for non-i64 type result." ); |
| 5875 | |
| 5876 | SDValue Read = DAG.getNode(ISD::READ_REGISTER, DL, |
| 5877 | DAG.getVTList(MVT::i32, MVT::i32, MVT::Other), |
| 5878 | N->getOperand(0), |
| 5879 | N->getOperand(1)); |
| 5880 | |
| 5881 | Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Read.getValue(0), |
| 5882 | Read.getValue(1))); |
| 5883 | Results.push_back(Read.getOperand(0)); |
| 5884 | } |
| 5885 | |
| 5886 | /// \p BC is a bitcast that is about to be turned into a VMOVDRR. |
| 5887 | /// When \p DstVT, the destination type of \p BC, is on the vector |
| 5888 | /// register bank and the source of bitcast, \p Op, operates on the same bank, |
| 5889 | /// it might be possible to combine them, such that everything stays on the |
| 5890 | /// vector register bank. |
| 5891 | /// \p return The node that would replace \p BT, if the combine |
| 5892 | /// is possible. |
| 5893 | static SDValue CombineVMOVDRRCandidateWithVecOp(const SDNode *BC, |
| 5894 | SelectionDAG &DAG) { |
| 5895 | SDValue Op = BC->getOperand(0); |
| 5896 | EVT DstVT = BC->getValueType(0); |
| 5897 | |
| 5898 | // The only vector instruction that can produce a scalar (remember, |
| 5899 | // since the bitcast was about to be turned into VMOVDRR, the source |
| 5900 | // type is i64) from a vector is EXTRACT_VECTOR_ELT. |
| 5901 | // Moreover, we can do this combine only if there is one use. |
| 5902 | // Finally, if the destination type is not a vector, there is not |
| 5903 | // much point on forcing everything on the vector bank. |
| 5904 | if (!DstVT.isVector() || Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT || |
| 5905 | !Op.hasOneUse()) |
| 5906 | return SDValue(); |
| 5907 | |
| 5908 | // If the index is not constant, we will introduce an additional |
| 5909 | // multiply that will stick. |
| 5910 | // Give up in that case. |
| 5911 | ConstantSDNode *Index = dyn_cast<ConstantSDNode>(Op.getOperand(1)); |
| 5912 | if (!Index) |
| 5913 | return SDValue(); |
| 5914 | unsigned DstNumElt = DstVT.getVectorNumElements(); |
| 5915 | |
| 5916 | // Compute the new index. |
| 5917 | const APInt &APIntIndex = Index->getAPIntValue(); |
| 5918 | APInt NewIndex(APIntIndex.getBitWidth(), DstNumElt); |
| 5919 | NewIndex *= APIntIndex; |
| 5920 | // Check if the new constant index fits into i32. |
| 5921 | if (NewIndex.getBitWidth() > 32) |
| 5922 | return SDValue(); |
| 5923 | |
| 5924 | // vMTy bitcast(i64 extractelt vNi64 src, i32 index) -> |
| 5925 | // vMTy extractsubvector vNxMTy (bitcast vNi64 src), i32 index*M) |
| 5926 | SDLoc dl(Op); |
| 5927 | SDValue = Op.getOperand(0); |
| 5928 | EVT VecVT = EVT::getVectorVT( |
| 5929 | *DAG.getContext(), DstVT.getScalarType(), |
| 5930 | ExtractSrc.getValueType().getVectorNumElements() * DstNumElt); |
| 5931 | SDValue BitCast = DAG.getNode(ISD::BITCAST, dl, VecVT, ExtractSrc); |
| 5932 | return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DstVT, BitCast, |
| 5933 | DAG.getConstant(NewIndex.getZExtValue(), dl, MVT::i32)); |
| 5934 | } |
| 5935 | |
| 5936 | /// ExpandBITCAST - If the target supports VFP, this function is called to |
| 5937 | /// expand a bit convert where either the source or destination type is i64 to |
| 5938 | /// use a VMOVDRR or VMOVRRD node. This should not be done when the non-i64 |
| 5939 | /// operand type is illegal (e.g., v2f32 for a target that doesn't support |
| 5940 | /// vectors), since the legalizer won't know what to do with that. |
| 5941 | SDValue ARMTargetLowering::ExpandBITCAST(SDNode *N, SelectionDAG &DAG, |
| 5942 | const ARMSubtarget *Subtarget) const { |
| 5943 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 5944 | SDLoc dl(N); |
| 5945 | SDValue Op = N->getOperand(0); |
| 5946 | |
| 5947 | // This function is only supposed to be called for i16 and i64 types, either |
| 5948 | // as the source or destination of the bit convert. |
| 5949 | EVT SrcVT = Op.getValueType(); |
| 5950 | EVT DstVT = N->getValueType(0); |
| 5951 | |
| 5952 | if ((SrcVT == MVT::i16 || SrcVT == MVT::i32) && |
| 5953 | (DstVT == MVT::f16 || DstVT == MVT::bf16)) |
| 5954 | return MoveToHPR(SDLoc(N), DAG, MVT::i32, DstVT.getSimpleVT(), |
| 5955 | DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), MVT::i32, Op)); |
| 5956 | |
| 5957 | if ((DstVT == MVT::i16 || DstVT == MVT::i32) && |
| 5958 | (SrcVT == MVT::f16 || SrcVT == MVT::bf16)) |
| 5959 | return DAG.getNode( |
| 5960 | ISD::TRUNCATE, SDLoc(N), DstVT, |
| 5961 | MoveFromHPR(SDLoc(N), DAG, MVT::i32, SrcVT.getSimpleVT(), Op)); |
| 5962 | |
| 5963 | if (!(SrcVT == MVT::i64 || DstVT == MVT::i64)) |
| 5964 | return SDValue(); |
| 5965 | |
| 5966 | // Turn i64->f64 into VMOVDRR. |
| 5967 | if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) { |
| 5968 | // Do not force values to GPRs (this is what VMOVDRR does for the inputs) |
| 5969 | // if we can combine the bitcast with its source. |
| 5970 | if (SDValue Val = CombineVMOVDRRCandidateWithVecOp(N, DAG)) |
| 5971 | return Val; |
| 5972 | |
| 5973 | SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, |
| 5974 | DAG.getConstant(0, dl, MVT::i32)); |
| 5975 | SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, |
| 5976 | DAG.getConstant(1, dl, MVT::i32)); |
| 5977 | return DAG.getNode(ISD::BITCAST, dl, DstVT, |
| 5978 | DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi)); |
| 5979 | } |
| 5980 | |
| 5981 | // Turn f64->i64 into VMOVRRD. |
| 5982 | if (DstVT == MVT::i64 && TLI.isTypeLegal(SrcVT)) { |
| 5983 | SDValue Cvt; |
| 5984 | if (DAG.getDataLayout().isBigEndian() && SrcVT.isVector() && |
| 5985 | SrcVT.getVectorNumElements() > 1) |
| 5986 | Cvt = DAG.getNode(ARMISD::VMOVRRD, dl, |
| 5987 | DAG.getVTList(MVT::i32, MVT::i32), |
| 5988 | DAG.getNode(ARMISD::VREV64, dl, SrcVT, Op)); |
| 5989 | else |
| 5990 | Cvt = DAG.getNode(ARMISD::VMOVRRD, dl, |
| 5991 | DAG.getVTList(MVT::i32, MVT::i32), Op); |
| 5992 | // Merge the pieces into a single i64 value. |
| 5993 | return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1)); |
| 5994 | } |
| 5995 | |
| 5996 | return SDValue(); |
| 5997 | } |
| 5998 | |
| 5999 | /// getZeroVector - Returns a vector of specified type with all zero elements. |
| 6000 | /// Zero vectors are used to represent vector negation and in those cases |
| 6001 | /// will be implemented with the NEON VNEG instruction. However, VNEG does |
| 6002 | /// not support i64 elements, so sometimes the zero vectors will need to be |
| 6003 | /// explicitly constructed. Regardless, use a canonical VMOV to create the |
| 6004 | /// zero vector. |
| 6005 | static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl) { |
| 6006 | assert(VT.isVector() && "Expected a vector type" ); |
| 6007 | // The canonical modified immediate encoding of a zero vector is....0! |
| 6008 | SDValue EncodedVal = DAG.getTargetConstant(0, dl, MVT::i32); |
| 6009 | EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; |
| 6010 | SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal); |
| 6011 | return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); |
| 6012 | } |
| 6013 | |
| 6014 | /// LowerShiftRightParts - Lower SRA_PARTS, which returns two |
| 6015 | /// i32 values and take a 2 x i32 value to shift plus a shift amount. |
| 6016 | SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op, |
| 6017 | SelectionDAG &DAG) const { |
| 6018 | assert(Op.getNumOperands() == 3 && "Not a double-shift!" ); |
| 6019 | EVT VT = Op.getValueType(); |
| 6020 | unsigned VTBits = VT.getSizeInBits(); |
| 6021 | SDLoc dl(Op); |
| 6022 | SDValue ShOpLo = Op.getOperand(0); |
| 6023 | SDValue ShOpHi = Op.getOperand(1); |
| 6024 | SDValue ShAmt = Op.getOperand(2); |
| 6025 | SDValue ARMcc; |
| 6026 | SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); |
| 6027 | unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL; |
| 6028 | |
| 6029 | assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS); |
| 6030 | |
| 6031 | SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, |
| 6032 | DAG.getConstant(VTBits, dl, MVT::i32), ShAmt); |
| 6033 | SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt); |
| 6034 | SDValue = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, |
| 6035 | DAG.getConstant(VTBits, dl, MVT::i32)); |
| 6036 | SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt); |
| 6037 | SDValue LoSmallShift = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); |
| 6038 | SDValue LoBigShift = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt); |
| 6039 | SDValue CmpLo = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32), |
| 6040 | ISD::SETGE, ARMcc, DAG, dl); |
| 6041 | SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, LoSmallShift, LoBigShift, |
| 6042 | ARMcc, CCR, CmpLo); |
| 6043 | |
| 6044 | SDValue HiSmallShift = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt); |
| 6045 | SDValue HiBigShift = Opc == ISD::SRA |
| 6046 | ? DAG.getNode(Opc, dl, VT, ShOpHi, |
| 6047 | DAG.getConstant(VTBits - 1, dl, VT)) |
| 6048 | : DAG.getConstant(0, dl, VT); |
| 6049 | SDValue CmpHi = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32), |
| 6050 | ISD::SETGE, ARMcc, DAG, dl); |
| 6051 | SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, HiSmallShift, HiBigShift, |
| 6052 | ARMcc, CCR, CmpHi); |
| 6053 | |
| 6054 | SDValue Ops[2] = { Lo, Hi }; |
| 6055 | return DAG.getMergeValues(Ops, dl); |
| 6056 | } |
| 6057 | |
| 6058 | /// LowerShiftLeftParts - Lower SHL_PARTS, which returns two |
| 6059 | /// i32 values and take a 2 x i32 value to shift plus a shift amount. |
| 6060 | SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op, |
| 6061 | SelectionDAG &DAG) const { |
| 6062 | assert(Op.getNumOperands() == 3 && "Not a double-shift!" ); |
| 6063 | EVT VT = Op.getValueType(); |
| 6064 | unsigned VTBits = VT.getSizeInBits(); |
| 6065 | SDLoc dl(Op); |
| 6066 | SDValue ShOpLo = Op.getOperand(0); |
| 6067 | SDValue ShOpHi = Op.getOperand(1); |
| 6068 | SDValue ShAmt = Op.getOperand(2); |
| 6069 | SDValue ARMcc; |
| 6070 | SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); |
| 6071 | |
| 6072 | assert(Op.getOpcode() == ISD::SHL_PARTS); |
| 6073 | SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, |
| 6074 | DAG.getConstant(VTBits, dl, MVT::i32), ShAmt); |
| 6075 | SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt); |
| 6076 | SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt); |
| 6077 | SDValue HiSmallShift = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); |
| 6078 | |
| 6079 | SDValue = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, |
| 6080 | DAG.getConstant(VTBits, dl, MVT::i32)); |
| 6081 | SDValue HiBigShift = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt); |
| 6082 | SDValue CmpHi = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32), |
| 6083 | ISD::SETGE, ARMcc, DAG, dl); |
| 6084 | SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, HiSmallShift, HiBigShift, |
| 6085 | ARMcc, CCR, CmpHi); |
| 6086 | |
| 6087 | SDValue CmpLo = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32), |
| 6088 | ISD::SETGE, ARMcc, DAG, dl); |
| 6089 | SDValue LoSmallShift = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt); |
| 6090 | SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, LoSmallShift, |
| 6091 | DAG.getConstant(0, dl, VT), ARMcc, CCR, CmpLo); |
| 6092 | |
| 6093 | SDValue Ops[2] = { Lo, Hi }; |
| 6094 | return DAG.getMergeValues(Ops, dl); |
| 6095 | } |
| 6096 | |
| 6097 | SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op, |
| 6098 | SelectionDAG &DAG) const { |
| 6099 | // The rounding mode is in bits 23:22 of the FPSCR. |
| 6100 | // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0 |
| 6101 | // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3) |
| 6102 | // so that the shift + and get folded into a bitfield extract. |
| 6103 | SDLoc dl(Op); |
| 6104 | SDValue Chain = Op.getOperand(0); |
| 6105 | SDValue Ops[] = {Chain, |
| 6106 | DAG.getConstant(Intrinsic::arm_get_fpscr, dl, MVT::i32)}; |
| 6107 | |
| 6108 | SDValue FPSCR = |
| 6109 | DAG.getNode(ISD::INTRINSIC_W_CHAIN, dl, {MVT::i32, MVT::Other}, Ops); |
| 6110 | Chain = FPSCR.getValue(1); |
| 6111 | SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPSCR, |
| 6112 | DAG.getConstant(1U << 22, dl, MVT::i32)); |
| 6113 | SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds, |
| 6114 | DAG.getConstant(22, dl, MVT::i32)); |
| 6115 | SDValue And = DAG.getNode(ISD::AND, dl, MVT::i32, RMODE, |
| 6116 | DAG.getConstant(3, dl, MVT::i32)); |
| 6117 | return DAG.getMergeValues({And, Chain}, dl); |
| 6118 | } |
| 6119 | |
| 6120 | static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG, |
| 6121 | const ARMSubtarget *ST) { |
| 6122 | SDLoc dl(N); |
| 6123 | EVT VT = N->getValueType(0); |
| 6124 | if (VT.isVector() && ST->hasNEON()) { |
| 6125 | |
| 6126 | // Compute the least significant set bit: LSB = X & -X |
| 6127 | SDValue X = N->getOperand(0); |
| 6128 | SDValue NX = DAG.getNode(ISD::SUB, dl, VT, getZeroVector(VT, DAG, dl), X); |
| 6129 | SDValue LSB = DAG.getNode(ISD::AND, dl, VT, X, NX); |
| 6130 | |
| 6131 | EVT ElemTy = VT.getVectorElementType(); |
| 6132 | |
| 6133 | if (ElemTy == MVT::i8) { |
| 6134 | // Compute with: cttz(x) = ctpop(lsb - 1) |
| 6135 | SDValue One = DAG.getNode(ARMISD::VMOVIMM, dl, VT, |
| 6136 | DAG.getTargetConstant(1, dl, ElemTy)); |
| 6137 | SDValue Bits = DAG.getNode(ISD::SUB, dl, VT, LSB, One); |
| 6138 | return DAG.getNode(ISD::CTPOP, dl, VT, Bits); |
| 6139 | } |
| 6140 | |
| 6141 | if ((ElemTy == MVT::i16 || ElemTy == MVT::i32) && |
| 6142 | (N->getOpcode() == ISD::CTTZ_ZERO_UNDEF)) { |
| 6143 | // Compute with: cttz(x) = (width - 1) - ctlz(lsb), if x != 0 |
| 6144 | unsigned NumBits = ElemTy.getSizeInBits(); |
| 6145 | SDValue WidthMinus1 = |
| 6146 | DAG.getNode(ARMISD::VMOVIMM, dl, VT, |
| 6147 | DAG.getTargetConstant(NumBits - 1, dl, ElemTy)); |
| 6148 | SDValue CTLZ = DAG.getNode(ISD::CTLZ, dl, VT, LSB); |
| 6149 | return DAG.getNode(ISD::SUB, dl, VT, WidthMinus1, CTLZ); |
| 6150 | } |
| 6151 | |
| 6152 | // Compute with: cttz(x) = ctpop(lsb - 1) |
| 6153 | |
| 6154 | // Compute LSB - 1. |
| 6155 | SDValue Bits; |
| 6156 | if (ElemTy == MVT::i64) { |
| 6157 | // Load constant 0xffff'ffff'ffff'ffff to register. |
| 6158 | SDValue FF = DAG.getNode(ARMISD::VMOVIMM, dl, VT, |
| 6159 | DAG.getTargetConstant(0x1eff, dl, MVT::i32)); |
| 6160 | Bits = DAG.getNode(ISD::ADD, dl, VT, LSB, FF); |
| 6161 | } else { |
| 6162 | SDValue One = DAG.getNode(ARMISD::VMOVIMM, dl, VT, |
| 6163 | DAG.getTargetConstant(1, dl, ElemTy)); |
| 6164 | Bits = DAG.getNode(ISD::SUB, dl, VT, LSB, One); |
| 6165 | } |
| 6166 | return DAG.getNode(ISD::CTPOP, dl, VT, Bits); |
| 6167 | } |
| 6168 | |
| 6169 | if (!ST->hasV6T2Ops()) |
| 6170 | return SDValue(); |
| 6171 | |
| 6172 | SDValue rbit = DAG.getNode(ISD::BITREVERSE, dl, VT, N->getOperand(0)); |
| 6173 | return DAG.getNode(ISD::CTLZ, dl, VT, rbit); |
| 6174 | } |
| 6175 | |
| 6176 | static SDValue LowerCTPOP(SDNode *N, SelectionDAG &DAG, |
| 6177 | const ARMSubtarget *ST) { |
| 6178 | EVT VT = N->getValueType(0); |
| 6179 | SDLoc DL(N); |
| 6180 | |
| 6181 | assert(ST->hasNEON() && "Custom ctpop lowering requires NEON." ); |
| 6182 | assert((VT == MVT::v1i64 || VT == MVT::v2i64 || VT == MVT::v2i32 || |
| 6183 | VT == MVT::v4i32 || VT == MVT::v4i16 || VT == MVT::v8i16) && |
| 6184 | "Unexpected type for custom ctpop lowering" ); |
| 6185 | |
| 6186 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 6187 | EVT VT8Bit = VT.is64BitVector() ? MVT::v8i8 : MVT::v16i8; |
| 6188 | SDValue Res = DAG.getBitcast(VT8Bit, N->getOperand(0)); |
| 6189 | Res = DAG.getNode(ISD::CTPOP, DL, VT8Bit, Res); |
| 6190 | |
| 6191 | // Widen v8i8/v16i8 CTPOP result to VT by repeatedly widening pairwise adds. |
| 6192 | unsigned EltSize = 8; |
| 6193 | unsigned NumElts = VT.is64BitVector() ? 8 : 16; |
| 6194 | while (EltSize != VT.getScalarSizeInBits()) { |
| 6195 | SmallVector<SDValue, 8> Ops; |
| 6196 | Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpaddlu, DL, |
| 6197 | TLI.getPointerTy(DAG.getDataLayout()))); |
| 6198 | Ops.push_back(Res); |
| 6199 | |
| 6200 | EltSize *= 2; |
| 6201 | NumElts /= 2; |
| 6202 | MVT WidenVT = MVT::getVectorVT(MVT::getIntegerVT(EltSize), NumElts); |
| 6203 | Res = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, WidenVT, Ops); |
| 6204 | } |
| 6205 | |
| 6206 | return Res; |
| 6207 | } |
| 6208 | |
| 6209 | /// Getvshiftimm - Check if this is a valid build_vector for the immediate |
| 6210 | /// operand of a vector shift operation, where all the elements of the |
| 6211 | /// build_vector must have the same constant integer value. |
| 6212 | static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { |
| 6213 | // Ignore bit_converts. |
| 6214 | while (Op.getOpcode() == ISD::BITCAST) |
| 6215 | Op = Op.getOperand(0); |
| 6216 | BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); |
| 6217 | APInt SplatBits, SplatUndef; |
| 6218 | unsigned SplatBitSize; |
| 6219 | bool HasAnyUndefs; |
| 6220 | if (!BVN || |
| 6221 | !BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs, |
| 6222 | ElementBits) || |
| 6223 | SplatBitSize > ElementBits) |
| 6224 | return false; |
| 6225 | Cnt = SplatBits.getSExtValue(); |
| 6226 | return true; |
| 6227 | } |
| 6228 | |
| 6229 | /// isVShiftLImm - Check if this is a valid build_vector for the immediate |
| 6230 | /// operand of a vector shift left operation. That value must be in the range: |
| 6231 | /// 0 <= Value < ElementBits for a left shift; or |
| 6232 | /// 0 <= Value <= ElementBits for a long left shift. |
| 6233 | static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) { |
| 6234 | assert(VT.isVector() && "vector shift count is not a vector type" ); |
| 6235 | int64_t ElementBits = VT.getScalarSizeInBits(); |
| 6236 | if (!getVShiftImm(Op, ElementBits, Cnt)) |
| 6237 | return false; |
| 6238 | return (Cnt >= 0 && (isLong ? Cnt - 1 : Cnt) < ElementBits); |
| 6239 | } |
| 6240 | |
| 6241 | /// isVShiftRImm - Check if this is a valid build_vector for the immediate |
| 6242 | /// operand of a vector shift right operation. For a shift opcode, the value |
| 6243 | /// is positive, but for an intrinsic the value count must be negative. The |
| 6244 | /// absolute value must be in the range: |
| 6245 | /// 1 <= |Value| <= ElementBits for a right shift; or |
| 6246 | /// 1 <= |Value| <= ElementBits/2 for a narrow right shift. |
| 6247 | static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic, |
| 6248 | int64_t &Cnt) { |
| 6249 | assert(VT.isVector() && "vector shift count is not a vector type" ); |
| 6250 | int64_t ElementBits = VT.getScalarSizeInBits(); |
| 6251 | if (!getVShiftImm(Op, ElementBits, Cnt)) |
| 6252 | return false; |
| 6253 | if (!isIntrinsic) |
| 6254 | return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits / 2 : ElementBits)); |
| 6255 | if (Cnt >= -(isNarrow ? ElementBits / 2 : ElementBits) && Cnt <= -1) { |
| 6256 | Cnt = -Cnt; |
| 6257 | return true; |
| 6258 | } |
| 6259 | return false; |
| 6260 | } |
| 6261 | |
| 6262 | static SDValue LowerShift(SDNode *N, SelectionDAG &DAG, |
| 6263 | const ARMSubtarget *ST) { |
| 6264 | EVT VT = N->getValueType(0); |
| 6265 | SDLoc dl(N); |
| 6266 | int64_t Cnt; |
| 6267 | |
| 6268 | if (!VT.isVector()) |
| 6269 | return SDValue(); |
| 6270 | |
| 6271 | // We essentially have two forms here. Shift by an immediate and shift by a |
| 6272 | // vector register (there are also shift by a gpr, but that is just handled |
| 6273 | // with a tablegen pattern). We cannot easily match shift by an immediate in |
| 6274 | // tablegen so we do that here and generate a VSHLIMM/VSHRsIMM/VSHRuIMM. |
| 6275 | // For shifting by a vector, we don't have VSHR, only VSHL (which can be |
| 6276 | // signed or unsigned, and a negative shift indicates a shift right). |
| 6277 | if (N->getOpcode() == ISD::SHL) { |
| 6278 | if (isVShiftLImm(N->getOperand(1), VT, false, Cnt)) |
| 6279 | return DAG.getNode(ARMISD::VSHLIMM, dl, VT, N->getOperand(0), |
| 6280 | DAG.getConstant(Cnt, dl, MVT::i32)); |
| 6281 | return DAG.getNode(ARMISD::VSHLu, dl, VT, N->getOperand(0), |
| 6282 | N->getOperand(1)); |
| 6283 | } |
| 6284 | |
| 6285 | assert((N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL) && |
| 6286 | "unexpected vector shift opcode" ); |
| 6287 | |
| 6288 | if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) { |
| 6289 | unsigned VShiftOpc = |
| 6290 | (N->getOpcode() == ISD::SRA ? ARMISD::VSHRsIMM : ARMISD::VSHRuIMM); |
| 6291 | return DAG.getNode(VShiftOpc, dl, VT, N->getOperand(0), |
| 6292 | DAG.getConstant(Cnt, dl, MVT::i32)); |
| 6293 | } |
| 6294 | |
| 6295 | // Other right shifts we don't have operations for (we use a shift left by a |
| 6296 | // negative number). |
| 6297 | EVT ShiftVT = N->getOperand(1).getValueType(); |
| 6298 | SDValue NegatedCount = DAG.getNode( |
| 6299 | ISD::SUB, dl, ShiftVT, getZeroVector(ShiftVT, DAG, dl), N->getOperand(1)); |
| 6300 | unsigned VShiftOpc = |
| 6301 | (N->getOpcode() == ISD::SRA ? ARMISD::VSHLs : ARMISD::VSHLu); |
| 6302 | return DAG.getNode(VShiftOpc, dl, VT, N->getOperand(0), NegatedCount); |
| 6303 | } |
| 6304 | |
| 6305 | static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG, |
| 6306 | const ARMSubtarget *ST) { |
| 6307 | EVT VT = N->getValueType(0); |
| 6308 | SDLoc dl(N); |
| 6309 | |
| 6310 | // We can get here for a node like i32 = ISD::SHL i32, i64 |
| 6311 | if (VT != MVT::i64) |
| 6312 | return SDValue(); |
| 6313 | |
| 6314 | assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA || |
| 6315 | N->getOpcode() == ISD::SHL) && |
| 6316 | "Unknown shift to lower!" ); |
| 6317 | |
| 6318 | unsigned ShOpc = N->getOpcode(); |
| 6319 | if (ST->hasMVEIntegerOps()) { |
| 6320 | SDValue ShAmt = N->getOperand(1); |
| 6321 | unsigned ShPartsOpc = ARMISD::LSLL; |
| 6322 | ConstantSDNode *Con = dyn_cast<ConstantSDNode>(ShAmt); |
| 6323 | |
| 6324 | // If the shift amount is greater than 32 or has a greater bitwidth than 64 |
| 6325 | // then do the default optimisation |
| 6326 | if (ShAmt->getValueType(0).getSizeInBits() > 64 || |
| 6327 | (Con && (Con->getZExtValue() == 0 || Con->getZExtValue() >= 32))) |
| 6328 | return SDValue(); |
| 6329 | |
| 6330 | // Extract the lower 32 bits of the shift amount if it's not an i32 |
| 6331 | if (ShAmt->getValueType(0) != MVT::i32) |
| 6332 | ShAmt = DAG.getZExtOrTrunc(ShAmt, dl, MVT::i32); |
| 6333 | |
| 6334 | if (ShOpc == ISD::SRL) { |
| 6335 | if (!Con) |
| 6336 | // There is no t2LSRLr instruction so negate and perform an lsll if the |
| 6337 | // shift amount is in a register, emulating a right shift. |
| 6338 | ShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, |
| 6339 | DAG.getConstant(0, dl, MVT::i32), ShAmt); |
| 6340 | else |
| 6341 | // Else generate an lsrl on the immediate shift amount |
| 6342 | ShPartsOpc = ARMISD::LSRL; |
| 6343 | } else if (ShOpc == ISD::SRA) |
| 6344 | ShPartsOpc = ARMISD::ASRL; |
| 6345 | |
| 6346 | // Lower 32 bits of the destination/source |
| 6347 | SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), |
| 6348 | DAG.getConstant(0, dl, MVT::i32)); |
| 6349 | // Upper 32 bits of the destination/source |
| 6350 | SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), |
| 6351 | DAG.getConstant(1, dl, MVT::i32)); |
| 6352 | |
| 6353 | // Generate the shift operation as computed above |
| 6354 | Lo = DAG.getNode(ShPartsOpc, dl, DAG.getVTList(MVT::i32, MVT::i32), Lo, Hi, |
| 6355 | ShAmt); |
| 6356 | // The upper 32 bits come from the second return value of lsll |
| 6357 | Hi = SDValue(Lo.getNode(), 1); |
| 6358 | return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); |
| 6359 | } |
| 6360 | |
| 6361 | // We only lower SRA, SRL of 1 here, all others use generic lowering. |
| 6362 | if (!isOneConstant(N->getOperand(1)) || N->getOpcode() == ISD::SHL) |
| 6363 | return SDValue(); |
| 6364 | |
| 6365 | // If we are in thumb mode, we don't have RRX. |
| 6366 | if (ST->isThumb1Only()) |
| 6367 | return SDValue(); |
| 6368 | |
| 6369 | // Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr. |
| 6370 | SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), |
| 6371 | DAG.getConstant(0, dl, MVT::i32)); |
| 6372 | SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), |
| 6373 | DAG.getConstant(1, dl, MVT::i32)); |
| 6374 | |
| 6375 | // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and |
| 6376 | // captures the result into a carry flag. |
| 6377 | unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG; |
| 6378 | Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Glue), Hi); |
| 6379 | |
| 6380 | // The low part is an ARMISD::RRX operand, which shifts the carry in. |
| 6381 | Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1)); |
| 6382 | |
| 6383 | // Merge the pieces into a single i64 value. |
| 6384 | return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); |
| 6385 | } |
| 6386 | |
| 6387 | static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG, |
| 6388 | const ARMSubtarget *ST) { |
| 6389 | bool Invert = false; |
| 6390 | bool Swap = false; |
| 6391 | unsigned Opc = ARMCC::AL; |
| 6392 | |
| 6393 | SDValue Op0 = Op.getOperand(0); |
| 6394 | SDValue Op1 = Op.getOperand(1); |
| 6395 | SDValue CC = Op.getOperand(2); |
| 6396 | EVT VT = Op.getValueType(); |
| 6397 | ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); |
| 6398 | SDLoc dl(Op); |
| 6399 | |
| 6400 | EVT CmpVT; |
| 6401 | if (ST->hasNEON()) |
| 6402 | CmpVT = Op0.getValueType().changeVectorElementTypeToInteger(); |
| 6403 | else { |
| 6404 | assert(ST->hasMVEIntegerOps() && |
| 6405 | "No hardware support for integer vector comparison!" ); |
| 6406 | |
| 6407 | if (Op.getValueType().getVectorElementType() != MVT::i1) |
| 6408 | return SDValue(); |
| 6409 | |
| 6410 | // Make sure we expand floating point setcc to scalar if we do not have |
| 6411 | // mve.fp, so that we can handle them from there. |
| 6412 | if (Op0.getValueType().isFloatingPoint() && !ST->hasMVEFloatOps()) |
| 6413 | return SDValue(); |
| 6414 | |
| 6415 | CmpVT = VT; |
| 6416 | } |
| 6417 | |
| 6418 | if (Op0.getValueType().getVectorElementType() == MVT::i64 && |
| 6419 | (SetCCOpcode == ISD::SETEQ || SetCCOpcode == ISD::SETNE)) { |
| 6420 | // Special-case integer 64-bit equality comparisons. They aren't legal, |
| 6421 | // but they can be lowered with a few vector instructions. |
| 6422 | unsigned CmpElements = CmpVT.getVectorNumElements() * 2; |
| 6423 | EVT SplitVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, CmpElements); |
| 6424 | SDValue CastOp0 = DAG.getNode(ISD::BITCAST, dl, SplitVT, Op0); |
| 6425 | SDValue CastOp1 = DAG.getNode(ISD::BITCAST, dl, SplitVT, Op1); |
| 6426 | SDValue Cmp = DAG.getNode(ISD::SETCC, dl, SplitVT, CastOp0, CastOp1, |
| 6427 | DAG.getCondCode(ISD::SETEQ)); |
| 6428 | SDValue Reversed = DAG.getNode(ARMISD::VREV64, dl, SplitVT, Cmp); |
| 6429 | SDValue Merged = DAG.getNode(ISD::AND, dl, SplitVT, Cmp, Reversed); |
| 6430 | Merged = DAG.getNode(ISD::BITCAST, dl, CmpVT, Merged); |
| 6431 | if (SetCCOpcode == ISD::SETNE) |
| 6432 | Merged = DAG.getNOT(dl, Merged, CmpVT); |
| 6433 | Merged = DAG.getSExtOrTrunc(Merged, dl, VT); |
| 6434 | return Merged; |
| 6435 | } |
| 6436 | |
| 6437 | if (CmpVT.getVectorElementType() == MVT::i64) |
| 6438 | // 64-bit comparisons are not legal in general. |
| 6439 | return SDValue(); |
| 6440 | |
| 6441 | if (Op1.getValueType().isFloatingPoint()) { |
| 6442 | switch (SetCCOpcode) { |
| 6443 | default: llvm_unreachable("Illegal FP comparison" ); |
| 6444 | case ISD::SETUNE: |
| 6445 | case ISD::SETNE: |
| 6446 | if (ST->hasMVEFloatOps()) { |
| 6447 | Opc = ARMCC::NE; break; |
| 6448 | } else { |
| 6449 | Invert = true; LLVM_FALLTHROUGH; |
| 6450 | } |
| 6451 | case ISD::SETOEQ: |
| 6452 | case ISD::SETEQ: Opc = ARMCC::EQ; break; |
| 6453 | case ISD::SETOLT: |
| 6454 | case ISD::SETLT: Swap = true; LLVM_FALLTHROUGH; |
| 6455 | case ISD::SETOGT: |
| 6456 | case ISD::SETGT: Opc = ARMCC::GT; break; |
| 6457 | case ISD::SETOLE: |
| 6458 | case ISD::SETLE: Swap = true; LLVM_FALLTHROUGH; |
| 6459 | case ISD::SETOGE: |
| 6460 | case ISD::SETGE: Opc = ARMCC::GE; break; |
| 6461 | case ISD::SETUGE: Swap = true; LLVM_FALLTHROUGH; |
| 6462 | case ISD::SETULE: Invert = true; Opc = ARMCC::GT; break; |
| 6463 | case ISD::SETUGT: Swap = true; LLVM_FALLTHROUGH; |
| 6464 | case ISD::SETULT: Invert = true; Opc = ARMCC::GE; break; |
| 6465 | case ISD::SETUEQ: Invert = true; LLVM_FALLTHROUGH; |
| 6466 | case ISD::SETONE: { |
| 6467 | // Expand this to (OLT | OGT). |
| 6468 | SDValue TmpOp0 = DAG.getNode(ARMISD::VCMP, dl, CmpVT, Op1, Op0, |
| 6469 | DAG.getConstant(ARMCC::GT, dl, MVT::i32)); |
| 6470 | SDValue TmpOp1 = DAG.getNode(ARMISD::VCMP, dl, CmpVT, Op0, Op1, |
| 6471 | DAG.getConstant(ARMCC::GT, dl, MVT::i32)); |
| 6472 | SDValue Result = DAG.getNode(ISD::OR, dl, CmpVT, TmpOp0, TmpOp1); |
| 6473 | if (Invert) |
| 6474 | Result = DAG.getNOT(dl, Result, VT); |
| 6475 | return Result; |
| 6476 | } |
| 6477 | case ISD::SETUO: Invert = true; LLVM_FALLTHROUGH; |
| 6478 | case ISD::SETO: { |
| 6479 | // Expand this to (OLT | OGE). |
| 6480 | SDValue TmpOp0 = DAG.getNode(ARMISD::VCMP, dl, CmpVT, Op1, Op0, |
| 6481 | DAG.getConstant(ARMCC::GT, dl, MVT::i32)); |
| 6482 | SDValue TmpOp1 = DAG.getNode(ARMISD::VCMP, dl, CmpVT, Op0, Op1, |
| 6483 | DAG.getConstant(ARMCC::GE, dl, MVT::i32)); |
| 6484 | SDValue Result = DAG.getNode(ISD::OR, dl, CmpVT, TmpOp0, TmpOp1); |
| 6485 | if (Invert) |
| 6486 | Result = DAG.getNOT(dl, Result, VT); |
| 6487 | return Result; |
| 6488 | } |
| 6489 | } |
| 6490 | } else { |
| 6491 | // Integer comparisons. |
| 6492 | switch (SetCCOpcode) { |
| 6493 | default: llvm_unreachable("Illegal integer comparison" ); |
| 6494 | case ISD::SETNE: |
| 6495 | if (ST->hasMVEIntegerOps()) { |
| 6496 | Opc = ARMCC::NE; break; |
| 6497 | } else { |
| 6498 | Invert = true; LLVM_FALLTHROUGH; |
| 6499 | } |
| 6500 | case ISD::SETEQ: Opc = ARMCC::EQ; break; |
| 6501 | case ISD::SETLT: Swap = true; LLVM_FALLTHROUGH; |
| 6502 | case ISD::SETGT: Opc = ARMCC::GT; break; |
| 6503 | case ISD::SETLE: Swap = true; LLVM_FALLTHROUGH; |
| 6504 | case ISD::SETGE: Opc = ARMCC::GE; break; |
| 6505 | case ISD::SETULT: Swap = true; LLVM_FALLTHROUGH; |
| 6506 | case ISD::SETUGT: Opc = ARMCC::HI; break; |
| 6507 | case ISD::SETULE: Swap = true; LLVM_FALLTHROUGH; |
| 6508 | case ISD::SETUGE: Opc = ARMCC::HS; break; |
| 6509 | } |
| 6510 | |
| 6511 | // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero). |
| 6512 | if (ST->hasNEON() && Opc == ARMCC::EQ) { |
| 6513 | SDValue AndOp; |
| 6514 | if (ISD::isBuildVectorAllZeros(Op1.getNode())) |
| 6515 | AndOp = Op0; |
| 6516 | else if (ISD::isBuildVectorAllZeros(Op0.getNode())) |
| 6517 | AndOp = Op1; |
| 6518 | |
| 6519 | // Ignore bitconvert. |
| 6520 | if (AndOp.getNode() && AndOp.getOpcode() == ISD::BITCAST) |
| 6521 | AndOp = AndOp.getOperand(0); |
| 6522 | |
| 6523 | if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) { |
| 6524 | Op0 = DAG.getNode(ISD::BITCAST, dl, CmpVT, AndOp.getOperand(0)); |
| 6525 | Op1 = DAG.getNode(ISD::BITCAST, dl, CmpVT, AndOp.getOperand(1)); |
| 6526 | SDValue Result = DAG.getNode(ARMISD::VTST, dl, CmpVT, Op0, Op1); |
| 6527 | if (!Invert) |
| 6528 | Result = DAG.getNOT(dl, Result, VT); |
| 6529 | return Result; |
| 6530 | } |
| 6531 | } |
| 6532 | } |
| 6533 | |
| 6534 | if (Swap) |
| 6535 | std::swap(Op0, Op1); |
| 6536 | |
| 6537 | // If one of the operands is a constant vector zero, attempt to fold the |
| 6538 | // comparison to a specialized compare-against-zero form. |
| 6539 | SDValue SingleOp; |
| 6540 | if (ISD::isBuildVectorAllZeros(Op1.getNode())) |
| 6541 | SingleOp = Op0; |
| 6542 | else if (ISD::isBuildVectorAllZeros(Op0.getNode())) { |
| 6543 | if (Opc == ARMCC::GE) |
| 6544 | Opc = ARMCC::LE; |
| 6545 | else if (Opc == ARMCC::GT) |
| 6546 | Opc = ARMCC::LT; |
| 6547 | SingleOp = Op1; |
| 6548 | } |
| 6549 | |
| 6550 | SDValue Result; |
| 6551 | if (SingleOp.getNode()) { |
| 6552 | Result = DAG.getNode(ARMISD::VCMPZ, dl, CmpVT, SingleOp, |
| 6553 | DAG.getConstant(Opc, dl, MVT::i32)); |
| 6554 | } else { |
| 6555 | Result = DAG.getNode(ARMISD::VCMP, dl, CmpVT, Op0, Op1, |
| 6556 | DAG.getConstant(Opc, dl, MVT::i32)); |
| 6557 | } |
| 6558 | |
| 6559 | Result = DAG.getSExtOrTrunc(Result, dl, VT); |
| 6560 | |
| 6561 | if (Invert) |
| 6562 | Result = DAG.getNOT(dl, Result, VT); |
| 6563 | |
| 6564 | return Result; |
| 6565 | } |
| 6566 | |
| 6567 | static SDValue LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) { |
| 6568 | SDValue LHS = Op.getOperand(0); |
| 6569 | SDValue RHS = Op.getOperand(1); |
| 6570 | SDValue Carry = Op.getOperand(2); |
| 6571 | SDValue Cond = Op.getOperand(3); |
| 6572 | SDLoc DL(Op); |
| 6573 | |
| 6574 | assert(LHS.getSimpleValueType().isInteger() && "SETCCCARRY is integer only." ); |
| 6575 | |
| 6576 | // ARMISD::SUBE expects a carry not a borrow like ISD::SUBCARRY so we |
| 6577 | // have to invert the carry first. |
| 6578 | Carry = DAG.getNode(ISD::SUB, DL, MVT::i32, |
| 6579 | DAG.getConstant(1, DL, MVT::i32), Carry); |
| 6580 | // This converts the boolean value carry into the carry flag. |
| 6581 | Carry = ConvertBooleanCarryToCarryFlag(Carry, DAG); |
| 6582 | |
| 6583 | SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32); |
| 6584 | SDValue Cmp = DAG.getNode(ARMISD::SUBE, DL, VTs, LHS, RHS, Carry); |
| 6585 | |
| 6586 | SDValue FVal = DAG.getConstant(0, DL, MVT::i32); |
| 6587 | SDValue TVal = DAG.getConstant(1, DL, MVT::i32); |
| 6588 | SDValue ARMcc = DAG.getConstant( |
| 6589 | IntCCToARMCC(cast<CondCodeSDNode>(Cond)->get()), DL, MVT::i32); |
| 6590 | SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); |
| 6591 | SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), DL, ARM::CPSR, |
| 6592 | Cmp.getValue(1), SDValue()); |
| 6593 | return DAG.getNode(ARMISD::CMOV, DL, Op.getValueType(), FVal, TVal, ARMcc, |
| 6594 | CCR, Chain.getValue(1)); |
| 6595 | } |
| 6596 | |
| 6597 | /// isVMOVModifiedImm - Check if the specified splat value corresponds to a |
| 6598 | /// valid vector constant for a NEON or MVE instruction with a "modified |
| 6599 | /// immediate" operand (e.g., VMOV). If so, return the encoded value. |
| 6600 | static SDValue isVMOVModifiedImm(uint64_t SplatBits, uint64_t SplatUndef, |
| 6601 | unsigned SplatBitSize, SelectionDAG &DAG, |
| 6602 | const SDLoc &dl, EVT &VT, EVT VectorVT, |
| 6603 | VMOVModImmType type) { |
| 6604 | unsigned OpCmode, Imm; |
| 6605 | bool is128Bits = VectorVT.is128BitVector(); |
| 6606 | |
| 6607 | // SplatBitSize is set to the smallest size that splats the vector, so a |
| 6608 | // zero vector will always have SplatBitSize == 8. However, NEON modified |
| 6609 | // immediate instructions others than VMOV do not support the 8-bit encoding |
| 6610 | // of a zero vector, and the default encoding of zero is supposed to be the |
| 6611 | // 32-bit version. |
| 6612 | if (SplatBits == 0) |
| 6613 | SplatBitSize = 32; |
| 6614 | |
| 6615 | switch (SplatBitSize) { |
| 6616 | case 8: |
| 6617 | if (type != VMOVModImm) |
| 6618 | return SDValue(); |
| 6619 | // Any 1-byte value is OK. Op=0, Cmode=1110. |
| 6620 | assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big" ); |
| 6621 | OpCmode = 0xe; |
| 6622 | Imm = SplatBits; |
| 6623 | VT = is128Bits ? MVT::v16i8 : MVT::v8i8; |
| 6624 | break; |
| 6625 | |
| 6626 | case 16: |
| 6627 | // NEON's 16-bit VMOV supports splat values where only one byte is nonzero. |
| 6628 | VT = is128Bits ? MVT::v8i16 : MVT::v4i16; |
| 6629 | if ((SplatBits & ~0xff) == 0) { |
| 6630 | // Value = 0x00nn: Op=x, Cmode=100x. |
| 6631 | OpCmode = 0x8; |
| 6632 | Imm = SplatBits; |
| 6633 | break; |
| 6634 | } |
| 6635 | if ((SplatBits & ~0xff00) == 0) { |
| 6636 | // Value = 0xnn00: Op=x, Cmode=101x. |
| 6637 | OpCmode = 0xa; |
| 6638 | Imm = SplatBits >> 8; |
| 6639 | break; |
| 6640 | } |
| 6641 | return SDValue(); |
| 6642 | |
| 6643 | case 32: |
| 6644 | // NEON's 32-bit VMOV supports splat values where: |
| 6645 | // * only one byte is nonzero, or |
| 6646 | // * the least significant byte is 0xff and the second byte is nonzero, or |
| 6647 | // * the least significant 2 bytes are 0xff and the third is nonzero. |
| 6648 | VT = is128Bits ? MVT::v4i32 : MVT::v2i32; |
| 6649 | if ((SplatBits & ~0xff) == 0) { |
| 6650 | // Value = 0x000000nn: Op=x, Cmode=000x. |
| 6651 | OpCmode = 0; |
| 6652 | Imm = SplatBits; |
| 6653 | break; |
| 6654 | } |
| 6655 | if ((SplatBits & ~0xff00) == 0) { |
| 6656 | // Value = 0x0000nn00: Op=x, Cmode=001x. |
| 6657 | OpCmode = 0x2; |
| 6658 | Imm = SplatBits >> 8; |
| 6659 | break; |
| 6660 | } |
| 6661 | if ((SplatBits & ~0xff0000) == 0) { |
| 6662 | // Value = 0x00nn0000: Op=x, Cmode=010x. |
| 6663 | OpCmode = 0x4; |
| 6664 | Imm = SplatBits >> 16; |
| 6665 | break; |
| 6666 | } |
| 6667 | if ((SplatBits & ~0xff000000) == 0) { |
| 6668 | // Value = 0xnn000000: Op=x, Cmode=011x. |
| 6669 | OpCmode = 0x6; |
| 6670 | Imm = SplatBits >> 24; |
| 6671 | break; |
| 6672 | } |
| 6673 | |
| 6674 | // cmode == 0b1100 and cmode == 0b1101 are not supported for VORR or VBIC |
| 6675 | if (type == OtherModImm) return SDValue(); |
| 6676 | |
| 6677 | if ((SplatBits & ~0xffff) == 0 && |
| 6678 | ((SplatBits | SplatUndef) & 0xff) == 0xff) { |
| 6679 | // Value = 0x0000nnff: Op=x, Cmode=1100. |
| 6680 | OpCmode = 0xc; |
| 6681 | Imm = SplatBits >> 8; |
| 6682 | break; |
| 6683 | } |
| 6684 | |
| 6685 | // cmode == 0b1101 is not supported for MVE VMVN |
| 6686 | if (type == MVEVMVNModImm) |
| 6687 | return SDValue(); |
| 6688 | |
| 6689 | if ((SplatBits & ~0xffffff) == 0 && |
| 6690 | ((SplatBits | SplatUndef) & 0xffff) == 0xffff) { |
| 6691 | // Value = 0x00nnffff: Op=x, Cmode=1101. |
| 6692 | OpCmode = 0xd; |
| 6693 | Imm = SplatBits >> 16; |
| 6694 | break; |
| 6695 | } |
| 6696 | |
| 6697 | // Note: there are a few 32-bit splat values (specifically: 00ffff00, |
| 6698 | // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not |
| 6699 | // VMOV.I32. A (very) minor optimization would be to replicate the value |
| 6700 | // and fall through here to test for a valid 64-bit splat. But, then the |
| 6701 | // caller would also need to check and handle the change in size. |
| 6702 | return SDValue(); |
| 6703 | |
| 6704 | case 64: { |
| 6705 | if (type != VMOVModImm) |
| 6706 | return SDValue(); |
| 6707 | // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff. |
| 6708 | uint64_t BitMask = 0xff; |
| 6709 | uint64_t Val = 0; |
| 6710 | unsigned ImmMask = 1; |
| 6711 | Imm = 0; |
| 6712 | for (int ByteNum = 0; ByteNum < 8; ++ByteNum) { |
| 6713 | if (((SplatBits | SplatUndef) & BitMask) == BitMask) { |
| 6714 | Val |= BitMask; |
| 6715 | Imm |= ImmMask; |
| 6716 | } else if ((SplatBits & BitMask) != 0) { |
| 6717 | return SDValue(); |
| 6718 | } |
| 6719 | BitMask <<= 8; |
| 6720 | ImmMask <<= 1; |
| 6721 | } |
| 6722 | |
| 6723 | if (DAG.getDataLayout().isBigEndian()) { |
| 6724 | // Reverse the order of elements within the vector. |
| 6725 | unsigned BytesPerElem = VectorVT.getScalarSizeInBits() / 8; |
| 6726 | unsigned Mask = (1 << BytesPerElem) - 1; |
| 6727 | unsigned NumElems = 8 / BytesPerElem; |
| 6728 | unsigned NewImm = 0; |
| 6729 | for (unsigned ElemNum = 0; ElemNum < NumElems; ++ElemNum) { |
| 6730 | unsigned Elem = ((Imm >> ElemNum * BytesPerElem) & Mask); |
| 6731 | NewImm |= Elem << (NumElems - ElemNum - 1) * BytesPerElem; |
| 6732 | } |
| 6733 | Imm = NewImm; |
| 6734 | } |
| 6735 | |
| 6736 | // Op=1, Cmode=1110. |
| 6737 | OpCmode = 0x1e; |
| 6738 | VT = is128Bits ? MVT::v2i64 : MVT::v1i64; |
| 6739 | break; |
| 6740 | } |
| 6741 | |
| 6742 | default: |
| 6743 | llvm_unreachable("unexpected size for isVMOVModifiedImm" ); |
| 6744 | } |
| 6745 | |
| 6746 | unsigned EncodedVal = ARM_AM::createVMOVModImm(OpCmode, Imm); |
| 6747 | return DAG.getTargetConstant(EncodedVal, dl, MVT::i32); |
| 6748 | } |
| 6749 | |
| 6750 | SDValue ARMTargetLowering::LowerConstantFP(SDValue Op, SelectionDAG &DAG, |
| 6751 | const ARMSubtarget *ST) const { |
| 6752 | EVT VT = Op.getValueType(); |
| 6753 | bool IsDouble = (VT == MVT::f64); |
| 6754 | ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Op); |
| 6755 | const APFloat &FPVal = CFP->getValueAPF(); |
| 6756 | |
| 6757 | // Prevent floating-point constants from using literal loads |
| 6758 | // when execute-only is enabled. |
| 6759 | if (ST->genExecuteOnly()) { |
| 6760 | // If we can represent the constant as an immediate, don't lower it |
| 6761 | if (isFPImmLegal(FPVal, VT)) |
| 6762 | return Op; |
| 6763 | // Otherwise, construct as integer, and move to float register |
| 6764 | APInt INTVal = FPVal.bitcastToAPInt(); |
| 6765 | SDLoc DL(CFP); |
| 6766 | switch (VT.getSimpleVT().SimpleTy) { |
| 6767 | default: |
| 6768 | llvm_unreachable("Unknown floating point type!" ); |
| 6769 | break; |
| 6770 | case MVT::f64: { |
| 6771 | SDValue Lo = DAG.getConstant(INTVal.trunc(32), DL, MVT::i32); |
| 6772 | SDValue Hi = DAG.getConstant(INTVal.lshr(32).trunc(32), DL, MVT::i32); |
| 6773 | return DAG.getNode(ARMISD::VMOVDRR, DL, MVT::f64, Lo, Hi); |
| 6774 | } |
| 6775 | case MVT::f32: |
| 6776 | return DAG.getNode(ARMISD::VMOVSR, DL, VT, |
| 6777 | DAG.getConstant(INTVal, DL, MVT::i32)); |
| 6778 | } |
| 6779 | } |
| 6780 | |
| 6781 | if (!ST->hasVFP3Base()) |
| 6782 | return SDValue(); |
| 6783 | |
| 6784 | // Use the default (constant pool) lowering for double constants when we have |
| 6785 | // an SP-only FPU |
| 6786 | if (IsDouble && !Subtarget->hasFP64()) |
| 6787 | return SDValue(); |
| 6788 | |
| 6789 | // Try splatting with a VMOV.f32... |
| 6790 | int ImmVal = IsDouble ? ARM_AM::getFP64Imm(FPVal) : ARM_AM::getFP32Imm(FPVal); |
| 6791 | |
| 6792 | if (ImmVal != -1) { |
| 6793 | if (IsDouble || !ST->useNEONForSinglePrecisionFP()) { |
| 6794 | // We have code in place to select a valid ConstantFP already, no need to |
| 6795 | // do any mangling. |
| 6796 | return Op; |
| 6797 | } |
| 6798 | |
| 6799 | // It's a float and we are trying to use NEON operations where |
| 6800 | // possible. Lower it to a splat followed by an extract. |
| 6801 | SDLoc DL(Op); |
| 6802 | SDValue NewVal = DAG.getTargetConstant(ImmVal, DL, MVT::i32); |
| 6803 | SDValue VecConstant = DAG.getNode(ARMISD::VMOVFPIMM, DL, MVT::v2f32, |
| 6804 | NewVal); |
| 6805 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecConstant, |
| 6806 | DAG.getConstant(0, DL, MVT::i32)); |
| 6807 | } |
| 6808 | |
| 6809 | // The rest of our options are NEON only, make sure that's allowed before |
| 6810 | // proceeding.. |
| 6811 | if (!ST->hasNEON() || (!IsDouble && !ST->useNEONForSinglePrecisionFP())) |
| 6812 | return SDValue(); |
| 6813 | |
| 6814 | EVT VMovVT; |
| 6815 | uint64_t iVal = FPVal.bitcastToAPInt().getZExtValue(); |
| 6816 | |
| 6817 | // It wouldn't really be worth bothering for doubles except for one very |
| 6818 | // important value, which does happen to match: 0.0. So make sure we don't do |
| 6819 | // anything stupid. |
| 6820 | if (IsDouble && (iVal & 0xffffffff) != (iVal >> 32)) |
| 6821 | return SDValue(); |
| 6822 | |
| 6823 | // Try a VMOV.i32 (FIXME: i8, i16, or i64 could work too). |
| 6824 | SDValue NewVal = isVMOVModifiedImm(iVal & 0xffffffffU, 0, 32, DAG, SDLoc(Op), |
| 6825 | VMovVT, VT, VMOVModImm); |
| 6826 | if (NewVal != SDValue()) { |
| 6827 | SDLoc DL(Op); |
| 6828 | SDValue VecConstant = DAG.getNode(ARMISD::VMOVIMM, DL, VMovVT, |
| 6829 | NewVal); |
| 6830 | if (IsDouble) |
| 6831 | return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant); |
| 6832 | |
| 6833 | // It's a float: cast and extract a vector element. |
| 6834 | SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32, |
| 6835 | VecConstant); |
| 6836 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant, |
| 6837 | DAG.getConstant(0, DL, MVT::i32)); |
| 6838 | } |
| 6839 | |
| 6840 | // Finally, try a VMVN.i32 |
| 6841 | NewVal = isVMOVModifiedImm(~iVal & 0xffffffffU, 0, 32, DAG, SDLoc(Op), VMovVT, |
| 6842 | VT, VMVNModImm); |
| 6843 | if (NewVal != SDValue()) { |
| 6844 | SDLoc DL(Op); |
| 6845 | SDValue VecConstant = DAG.getNode(ARMISD::VMVNIMM, DL, VMovVT, NewVal); |
| 6846 | |
| 6847 | if (IsDouble) |
| 6848 | return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant); |
| 6849 | |
| 6850 | // It's a float: cast and extract a vector element. |
| 6851 | SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32, |
| 6852 | VecConstant); |
| 6853 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant, |
| 6854 | DAG.getConstant(0, DL, MVT::i32)); |
| 6855 | } |
| 6856 | |
| 6857 | return SDValue(); |
| 6858 | } |
| 6859 | |
| 6860 | // check if an VEXT instruction can handle the shuffle mask when the |
| 6861 | // vector sources of the shuffle are the same. |
| 6862 | static bool isSingletonVEXTMask(ArrayRef<int> M, EVT VT, unsigned &Imm) { |
| 6863 | unsigned NumElts = VT.getVectorNumElements(); |
| 6864 | |
| 6865 | // Assume that the first shuffle index is not UNDEF. Fail if it is. |
| 6866 | if (M[0] < 0) |
| 6867 | return false; |
| 6868 | |
| 6869 | Imm = M[0]; |
| 6870 | |
| 6871 | // If this is a VEXT shuffle, the immediate value is the index of the first |
| 6872 | // element. The other shuffle indices must be the successive elements after |
| 6873 | // the first one. |
| 6874 | unsigned ExpectedElt = Imm; |
| 6875 | for (unsigned i = 1; i < NumElts; ++i) { |
| 6876 | // Increment the expected index. If it wraps around, just follow it |
| 6877 | // back to index zero and keep going. |
| 6878 | ++ExpectedElt; |
| 6879 | if (ExpectedElt == NumElts) |
| 6880 | ExpectedElt = 0; |
| 6881 | |
| 6882 | if (M[i] < 0) continue; // ignore UNDEF indices |
| 6883 | if (ExpectedElt != static_cast<unsigned>(M[i])) |
| 6884 | return false; |
| 6885 | } |
| 6886 | |
| 6887 | return true; |
| 6888 | } |
| 6889 | |
| 6890 | static bool isVEXTMask(ArrayRef<int> M, EVT VT, |
| 6891 | bool &ReverseVEXT, unsigned &Imm) { |
| 6892 | unsigned NumElts = VT.getVectorNumElements(); |
| 6893 | ReverseVEXT = false; |
| 6894 | |
| 6895 | // Assume that the first shuffle index is not UNDEF. Fail if it is. |
| 6896 | if (M[0] < 0) |
| 6897 | return false; |
| 6898 | |
| 6899 | Imm = M[0]; |
| 6900 | |
| 6901 | // If this is a VEXT shuffle, the immediate value is the index of the first |
| 6902 | // element. The other shuffle indices must be the successive elements after |
| 6903 | // the first one. |
| 6904 | unsigned ExpectedElt = Imm; |
| 6905 | for (unsigned i = 1; i < NumElts; ++i) { |
| 6906 | // Increment the expected index. If it wraps around, it may still be |
| 6907 | // a VEXT but the source vectors must be swapped. |
| 6908 | ExpectedElt += 1; |
| 6909 | if (ExpectedElt == NumElts * 2) { |
| 6910 | ExpectedElt = 0; |
| 6911 | ReverseVEXT = true; |
| 6912 | } |
| 6913 | |
| 6914 | if (M[i] < 0) continue; // ignore UNDEF indices |
| 6915 | if (ExpectedElt != static_cast<unsigned>(M[i])) |
| 6916 | return false; |
| 6917 | } |
| 6918 | |
| 6919 | // Adjust the index value if the source operands will be swapped. |
| 6920 | if (ReverseVEXT) |
| 6921 | Imm -= NumElts; |
| 6922 | |
| 6923 | return true; |
| 6924 | } |
| 6925 | |
| 6926 | /// isVREVMask - Check if a vector shuffle corresponds to a VREV |
| 6927 | /// instruction with the specified blocksize. (The order of the elements |
| 6928 | /// within each block of the vector is reversed.) |
| 6929 | static bool isVREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) { |
| 6930 | assert((BlockSize==16 || BlockSize==32 || BlockSize==64) && |
| 6931 | "Only possible block sizes for VREV are: 16, 32, 64" ); |
| 6932 | |
| 6933 | unsigned EltSz = VT.getScalarSizeInBits(); |
| 6934 | if (EltSz == 64) |
| 6935 | return false; |
| 6936 | |
| 6937 | unsigned NumElts = VT.getVectorNumElements(); |
| 6938 | unsigned BlockElts = M[0] + 1; |
| 6939 | // If the first shuffle index is UNDEF, be optimistic. |
| 6940 | if (M[0] < 0) |
| 6941 | BlockElts = BlockSize / EltSz; |
| 6942 | |
| 6943 | if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz) |
| 6944 | return false; |
| 6945 | |
| 6946 | for (unsigned i = 0; i < NumElts; ++i) { |
| 6947 | if (M[i] < 0) continue; // ignore UNDEF indices |
| 6948 | if ((unsigned) M[i] != (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts)) |
| 6949 | return false; |
| 6950 | } |
| 6951 | |
| 6952 | return true; |
| 6953 | } |
| 6954 | |
| 6955 | static bool isVTBLMask(ArrayRef<int> M, EVT VT) { |
| 6956 | // We can handle <8 x i8> vector shuffles. If the index in the mask is out of |
| 6957 | // range, then 0 is placed into the resulting vector. So pretty much any mask |
| 6958 | // of 8 elements can work here. |
| 6959 | return VT == MVT::v8i8 && M.size() == 8; |
| 6960 | } |
| 6961 | |
| 6962 | static unsigned SelectPairHalf(unsigned Elements, ArrayRef<int> Mask, |
| 6963 | unsigned Index) { |
| 6964 | if (Mask.size() == Elements * 2) |
| 6965 | return Index / Elements; |
| 6966 | return Mask[Index] == 0 ? 0 : 1; |
| 6967 | } |
| 6968 | |
| 6969 | // Checks whether the shuffle mask represents a vector transpose (VTRN) by |
| 6970 | // checking that pairs of elements in the shuffle mask represent the same index |
| 6971 | // in each vector, incrementing the expected index by 2 at each step. |
| 6972 | // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 2, 6] |
| 6973 | // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,c,g} |
| 6974 | // v2={e,f,g,h} |
| 6975 | // WhichResult gives the offset for each element in the mask based on which |
| 6976 | // of the two results it belongs to. |
| 6977 | // |
| 6978 | // The transpose can be represented either as: |
| 6979 | // result1 = shufflevector v1, v2, result1_shuffle_mask |
| 6980 | // result2 = shufflevector v1, v2, result2_shuffle_mask |
| 6981 | // where v1/v2 and the shuffle masks have the same number of elements |
| 6982 | // (here WhichResult (see below) indicates which result is being checked) |
| 6983 | // |
| 6984 | // or as: |
| 6985 | // results = shufflevector v1, v2, shuffle_mask |
| 6986 | // where both results are returned in one vector and the shuffle mask has twice |
| 6987 | // as many elements as v1/v2 (here WhichResult will always be 0 if true) here we |
| 6988 | // want to check the low half and high half of the shuffle mask as if it were |
| 6989 | // the other case |
| 6990 | static bool isVTRNMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { |
| 6991 | unsigned EltSz = VT.getScalarSizeInBits(); |
| 6992 | if (EltSz == 64) |
| 6993 | return false; |
| 6994 | |
| 6995 | unsigned NumElts = VT.getVectorNumElements(); |
| 6996 | if (M.size() != NumElts && M.size() != NumElts*2) |
| 6997 | return false; |
| 6998 | |
| 6999 | // If the mask is twice as long as the input vector then we need to check the |
| 7000 | // upper and lower parts of the mask with a matching value for WhichResult |
| 7001 | // FIXME: A mask with only even values will be rejected in case the first |
| 7002 | // element is undefined, e.g. [-1, 4, 2, 6] will be rejected, because only |
| 7003 | // M[0] is used to determine WhichResult |
| 7004 | for (unsigned i = 0; i < M.size(); i += NumElts) { |
| 7005 | WhichResult = SelectPairHalf(NumElts, M, i); |
| 7006 | for (unsigned j = 0; j < NumElts; j += 2) { |
| 7007 | if ((M[i+j] >= 0 && (unsigned) M[i+j] != j + WhichResult) || |
| 7008 | (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != j + NumElts + WhichResult)) |
| 7009 | return false; |
| 7010 | } |
| 7011 | } |
| 7012 | |
| 7013 | if (M.size() == NumElts*2) |
| 7014 | WhichResult = 0; |
| 7015 | |
| 7016 | return true; |
| 7017 | } |
| 7018 | |
| 7019 | /// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of |
| 7020 | /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". |
| 7021 | /// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>. |
| 7022 | static bool isVTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ |
| 7023 | unsigned EltSz = VT.getScalarSizeInBits(); |
| 7024 | if (EltSz == 64) |
| 7025 | return false; |
| 7026 | |
| 7027 | unsigned NumElts = VT.getVectorNumElements(); |
| 7028 | if (M.size() != NumElts && M.size() != NumElts*2) |
| 7029 | return false; |
| 7030 | |
| 7031 | for (unsigned i = 0; i < M.size(); i += NumElts) { |
| 7032 | WhichResult = SelectPairHalf(NumElts, M, i); |
| 7033 | for (unsigned j = 0; j < NumElts; j += 2) { |
| 7034 | if ((M[i+j] >= 0 && (unsigned) M[i+j] != j + WhichResult) || |
| 7035 | (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != j + WhichResult)) |
| 7036 | return false; |
| 7037 | } |
| 7038 | } |
| 7039 | |
| 7040 | if (M.size() == NumElts*2) |
| 7041 | WhichResult = 0; |
| 7042 | |
| 7043 | return true; |
| 7044 | } |
| 7045 | |
| 7046 | // Checks whether the shuffle mask represents a vector unzip (VUZP) by checking |
| 7047 | // that the mask elements are either all even and in steps of size 2 or all odd |
| 7048 | // and in steps of size 2. |
| 7049 | // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 2, 4, 6] |
| 7050 | // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,c,e,g} |
| 7051 | // v2={e,f,g,h} |
| 7052 | // Requires similar checks to that of isVTRNMask with |
| 7053 | // respect the how results are returned. |
| 7054 | static bool isVUZPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { |
| 7055 | unsigned EltSz = VT.getScalarSizeInBits(); |
| 7056 | if (EltSz == 64) |
| 7057 | return false; |
| 7058 | |
| 7059 | unsigned NumElts = VT.getVectorNumElements(); |
| 7060 | if (M.size() != NumElts && M.size() != NumElts*2) |
| 7061 | return false; |
| 7062 | |
| 7063 | for (unsigned i = 0; i < M.size(); i += NumElts) { |
| 7064 | WhichResult = SelectPairHalf(NumElts, M, i); |
| 7065 | for (unsigned j = 0; j < NumElts; ++j) { |
| 7066 | if (M[i+j] >= 0 && (unsigned) M[i+j] != 2 * j + WhichResult) |
| 7067 | return false; |
| 7068 | } |
| 7069 | } |
| 7070 | |
| 7071 | if (M.size() == NumElts*2) |
| 7072 | WhichResult = 0; |
| 7073 | |
| 7074 | // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. |
| 7075 | if (VT.is64BitVector() && EltSz == 32) |
| 7076 | return false; |
| 7077 | |
| 7078 | return true; |
| 7079 | } |
| 7080 | |
| 7081 | /// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of |
| 7082 | /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". |
| 7083 | /// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>, |
| 7084 | static bool isVUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ |
| 7085 | unsigned EltSz = VT.getScalarSizeInBits(); |
| 7086 | if (EltSz == 64) |
| 7087 | return false; |
| 7088 | |
| 7089 | unsigned NumElts = VT.getVectorNumElements(); |
| 7090 | if (M.size() != NumElts && M.size() != NumElts*2) |
| 7091 | return false; |
| 7092 | |
| 7093 | unsigned Half = NumElts / 2; |
| 7094 | for (unsigned i = 0; i < M.size(); i += NumElts) { |
| 7095 | WhichResult = SelectPairHalf(NumElts, M, i); |
| 7096 | for (unsigned j = 0; j < NumElts; j += Half) { |
| 7097 | unsigned Idx = WhichResult; |
| 7098 | for (unsigned k = 0; k < Half; ++k) { |
| 7099 | int MIdx = M[i + j + k]; |
| 7100 | if (MIdx >= 0 && (unsigned) MIdx != Idx) |
| 7101 | return false; |
| 7102 | Idx += 2; |
| 7103 | } |
| 7104 | } |
| 7105 | } |
| 7106 | |
| 7107 | if (M.size() == NumElts*2) |
| 7108 | WhichResult = 0; |
| 7109 | |
| 7110 | // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. |
| 7111 | if (VT.is64BitVector() && EltSz == 32) |
| 7112 | return false; |
| 7113 | |
| 7114 | return true; |
| 7115 | } |
| 7116 | |
| 7117 | // Checks whether the shuffle mask represents a vector zip (VZIP) by checking |
| 7118 | // that pairs of elements of the shufflemask represent the same index in each |
| 7119 | // vector incrementing sequentially through the vectors. |
| 7120 | // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 1, 5] |
| 7121 | // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,b,f} |
| 7122 | // v2={e,f,g,h} |
| 7123 | // Requires similar checks to that of isVTRNMask with respect the how results |
| 7124 | // are returned. |
| 7125 | static bool isVZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { |
| 7126 | unsigned EltSz = VT.getScalarSizeInBits(); |
| 7127 | if (EltSz == 64) |
| 7128 | return false; |
| 7129 | |
| 7130 | unsigned NumElts = VT.getVectorNumElements(); |
| 7131 | if (M.size() != NumElts && M.size() != NumElts*2) |
| 7132 | return false; |
| 7133 | |
| 7134 | for (unsigned i = 0; i < M.size(); i += NumElts) { |
| 7135 | WhichResult = SelectPairHalf(NumElts, M, i); |
| 7136 | unsigned Idx = WhichResult * NumElts / 2; |
| 7137 | for (unsigned j = 0; j < NumElts; j += 2) { |
| 7138 | if ((M[i+j] >= 0 && (unsigned) M[i+j] != Idx) || |
| 7139 | (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != Idx + NumElts)) |
| 7140 | return false; |
| 7141 | Idx += 1; |
| 7142 | } |
| 7143 | } |
| 7144 | |
| 7145 | if (M.size() == NumElts*2) |
| 7146 | WhichResult = 0; |
| 7147 | |
| 7148 | // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. |
| 7149 | if (VT.is64BitVector() && EltSz == 32) |
| 7150 | return false; |
| 7151 | |
| 7152 | return true; |
| 7153 | } |
| 7154 | |
| 7155 | /// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of |
| 7156 | /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". |
| 7157 | /// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>. |
| 7158 | static bool isVZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ |
| 7159 | unsigned EltSz = VT.getScalarSizeInBits(); |
| 7160 | if (EltSz == 64) |
| 7161 | return false; |
| 7162 | |
| 7163 | unsigned NumElts = VT.getVectorNumElements(); |
| 7164 | if (M.size() != NumElts && M.size() != NumElts*2) |
| 7165 | return false; |
| 7166 | |
| 7167 | for (unsigned i = 0; i < M.size(); i += NumElts) { |
| 7168 | WhichResult = SelectPairHalf(NumElts, M, i); |
| 7169 | unsigned Idx = WhichResult * NumElts / 2; |
| 7170 | for (unsigned j = 0; j < NumElts; j += 2) { |
| 7171 | if ((M[i+j] >= 0 && (unsigned) M[i+j] != Idx) || |
| 7172 | (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != Idx)) |
| 7173 | return false; |
| 7174 | Idx += 1; |
| 7175 | } |
| 7176 | } |
| 7177 | |
| 7178 | if (M.size() == NumElts*2) |
| 7179 | WhichResult = 0; |
| 7180 | |
| 7181 | // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. |
| 7182 | if (VT.is64BitVector() && EltSz == 32) |
| 7183 | return false; |
| 7184 | |
| 7185 | return true; |
| 7186 | } |
| 7187 | |
| 7188 | /// Check if \p ShuffleMask is a NEON two-result shuffle (VZIP, VUZP, VTRN), |
| 7189 | /// and return the corresponding ARMISD opcode if it is, or 0 if it isn't. |
| 7190 | static unsigned isNEONTwoResultShuffleMask(ArrayRef<int> ShuffleMask, EVT VT, |
| 7191 | unsigned &WhichResult, |
| 7192 | bool &isV_UNDEF) { |
| 7193 | isV_UNDEF = false; |
| 7194 | if (isVTRNMask(ShuffleMask, VT, WhichResult)) |
| 7195 | return ARMISD::VTRN; |
| 7196 | if (isVUZPMask(ShuffleMask, VT, WhichResult)) |
| 7197 | return ARMISD::VUZP; |
| 7198 | if (isVZIPMask(ShuffleMask, VT, WhichResult)) |
| 7199 | return ARMISD::VZIP; |
| 7200 | |
| 7201 | isV_UNDEF = true; |
| 7202 | if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult)) |
| 7203 | return ARMISD::VTRN; |
| 7204 | if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult)) |
| 7205 | return ARMISD::VUZP; |
| 7206 | if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult)) |
| 7207 | return ARMISD::VZIP; |
| 7208 | |
| 7209 | return 0; |
| 7210 | } |
| 7211 | |
| 7212 | /// \return true if this is a reverse operation on an vector. |
| 7213 | static bool isReverseMask(ArrayRef<int> M, EVT VT) { |
| 7214 | unsigned NumElts = VT.getVectorNumElements(); |
| 7215 | // Make sure the mask has the right size. |
| 7216 | if (NumElts != M.size()) |
| 7217 | return false; |
| 7218 | |
| 7219 | // Look for <15, ..., 3, -1, 1, 0>. |
| 7220 | for (unsigned i = 0; i != NumElts; ++i) |
| 7221 | if (M[i] >= 0 && M[i] != (int) (NumElts - 1 - i)) |
| 7222 | return false; |
| 7223 | |
| 7224 | return true; |
| 7225 | } |
| 7226 | |
| 7227 | static bool isVMOVNMask(ArrayRef<int> M, EVT VT, bool Top) { |
| 7228 | unsigned NumElts = VT.getVectorNumElements(); |
| 7229 | // Make sure the mask has the right size. |
| 7230 | if (NumElts != M.size() || (VT != MVT::v8i16 && VT != MVT::v16i8)) |
| 7231 | return false; |
| 7232 | |
| 7233 | // If Top |
| 7234 | // Look for <0, N, 2, N+2, 4, N+4, ..>. |
| 7235 | // This inserts Input2 into Input1 |
| 7236 | // else if not Top |
| 7237 | // Look for <0, N+1, 2, N+3, 4, N+5, ..> |
| 7238 | // This inserts Input1 into Input2 |
| 7239 | unsigned Offset = Top ? 0 : 1; |
| 7240 | for (unsigned i = 0; i < NumElts; i+=2) { |
| 7241 | if (M[i] >= 0 && M[i] != (int)i) |
| 7242 | return false; |
| 7243 | if (M[i+1] >= 0 && M[i+1] != (int)(NumElts + i + Offset)) |
| 7244 | return false; |
| 7245 | } |
| 7246 | |
| 7247 | return true; |
| 7248 | } |
| 7249 | |
| 7250 | // Reconstruct an MVE VCVT from a BuildVector of scalar fptrunc, all extracted |
| 7251 | // from a pair of inputs. For example: |
| 7252 | // BUILDVECTOR(FP_ROUND(EXTRACT_ELT(X, 0), |
| 7253 | // FP_ROUND(EXTRACT_ELT(Y, 0), |
| 7254 | // FP_ROUND(EXTRACT_ELT(X, 1), |
| 7255 | // FP_ROUND(EXTRACT_ELT(Y, 1), ...) |
| 7256 | static SDValue LowerBuildVectorOfFPTrunc(SDValue BV, SelectionDAG &DAG, |
| 7257 | const ARMSubtarget *ST) { |
| 7258 | assert(BV.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!" ); |
| 7259 | if (!ST->hasMVEFloatOps()) |
| 7260 | return SDValue(); |
| 7261 | |
| 7262 | SDLoc dl(BV); |
| 7263 | EVT VT = BV.getValueType(); |
| 7264 | if (VT != MVT::v8f16) |
| 7265 | return SDValue(); |
| 7266 | |
| 7267 | // We are looking for a buildvector of fptrunc elements, where all the |
| 7268 | // elements are interleavingly extracted from two sources. Check the first two |
| 7269 | // items are valid enough and extract some info from them (they are checked |
| 7270 | // properly in the loop below). |
| 7271 | if (BV.getOperand(0).getOpcode() != ISD::FP_ROUND || |
| 7272 | BV.getOperand(0).getOperand(0).getOpcode() != ISD::EXTRACT_VECTOR_ELT || |
| 7273 | BV.getOperand(0).getOperand(0).getConstantOperandVal(1) != 0) |
| 7274 | return SDValue(); |
| 7275 | if (BV.getOperand(1).getOpcode() != ISD::FP_ROUND || |
| 7276 | BV.getOperand(1).getOperand(0).getOpcode() != ISD::EXTRACT_VECTOR_ELT || |
| 7277 | BV.getOperand(1).getOperand(0).getConstantOperandVal(1) != 0) |
| 7278 | return SDValue(); |
| 7279 | SDValue Op0 = BV.getOperand(0).getOperand(0).getOperand(0); |
| 7280 | SDValue Op1 = BV.getOperand(1).getOperand(0).getOperand(0); |
| 7281 | if (Op0.getValueType() != MVT::v4f32 || Op1.getValueType() != MVT::v4f32) |
| 7282 | return SDValue(); |
| 7283 | |
| 7284 | // Check all the values in the BuildVector line up with our expectations. |
| 7285 | for (unsigned i = 1; i < 4; i++) { |
| 7286 | auto Check = [](SDValue Trunc, SDValue Op, unsigned Idx) { |
| 7287 | return Trunc.getOpcode() == ISD::FP_ROUND && |
| 7288 | Trunc.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT && |
| 7289 | Trunc.getOperand(0).getOperand(0) == Op && |
| 7290 | Trunc.getOperand(0).getConstantOperandVal(1) == Idx; |
| 7291 | }; |
| 7292 | if (!Check(BV.getOperand(i * 2 + 0), Op0, i)) |
| 7293 | return SDValue(); |
| 7294 | if (!Check(BV.getOperand(i * 2 + 1), Op1, i)) |
| 7295 | return SDValue(); |
| 7296 | } |
| 7297 | |
| 7298 | SDValue N1 = DAG.getNode(ARMISD::VCVTN, dl, VT, DAG.getUNDEF(VT), Op0, |
| 7299 | DAG.getConstant(0, dl, MVT::i32)); |
| 7300 | return DAG.getNode(ARMISD::VCVTN, dl, VT, N1, Op1, |
| 7301 | DAG.getConstant(1, dl, MVT::i32)); |
| 7302 | } |
| 7303 | |
| 7304 | // Reconstruct an MVE VCVT from a BuildVector of scalar fpext, all extracted |
| 7305 | // from a single input on alternating lanes. For example: |
| 7306 | // BUILDVECTOR(FP_ROUND(EXTRACT_ELT(X, 0), |
| 7307 | // FP_ROUND(EXTRACT_ELT(X, 2), |
| 7308 | // FP_ROUND(EXTRACT_ELT(X, 4), ...) |
| 7309 | static SDValue LowerBuildVectorOfFPExt(SDValue BV, SelectionDAG &DAG, |
| 7310 | const ARMSubtarget *ST) { |
| 7311 | assert(BV.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!" ); |
| 7312 | if (!ST->hasMVEFloatOps()) |
| 7313 | return SDValue(); |
| 7314 | |
| 7315 | SDLoc dl(BV); |
| 7316 | EVT VT = BV.getValueType(); |
| 7317 | if (VT != MVT::v4f32) |
| 7318 | return SDValue(); |
| 7319 | |
| 7320 | // We are looking for a buildvector of fptext elements, where all the |
| 7321 | // elements are alternating lanes from a single source. For example <0,2,4,6> |
| 7322 | // or <1,3,5,7>. Check the first two items are valid enough and extract some |
| 7323 | // info from them (they are checked properly in the loop below). |
| 7324 | if (BV.getOperand(0).getOpcode() != ISD::FP_EXTEND || |
| 7325 | BV.getOperand(0).getOperand(0).getOpcode() != ISD::EXTRACT_VECTOR_ELT) |
| 7326 | return SDValue(); |
| 7327 | SDValue Op0 = BV.getOperand(0).getOperand(0).getOperand(0); |
| 7328 | int Offset = BV.getOperand(0).getOperand(0).getConstantOperandVal(1); |
| 7329 | if (Op0.getValueType() != MVT::v8f16 || (Offset != 0 && Offset != 1)) |
| 7330 | return SDValue(); |
| 7331 | |
| 7332 | // Check all the values in the BuildVector line up with our expectations. |
| 7333 | for (unsigned i = 1; i < 4; i++) { |
| 7334 | auto Check = [](SDValue Trunc, SDValue Op, unsigned Idx) { |
| 7335 | return Trunc.getOpcode() == ISD::FP_EXTEND && |
| 7336 | Trunc.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT && |
| 7337 | Trunc.getOperand(0).getOperand(0) == Op && |
| 7338 | Trunc.getOperand(0).getConstantOperandVal(1) == Idx; |
| 7339 | }; |
| 7340 | if (!Check(BV.getOperand(i), Op0, 2 * i + Offset)) |
| 7341 | return SDValue(); |
| 7342 | } |
| 7343 | |
| 7344 | return DAG.getNode(ARMISD::VCVTL, dl, VT, Op0, |
| 7345 | DAG.getConstant(Offset, dl, MVT::i32)); |
| 7346 | } |
| 7347 | |
| 7348 | // If N is an integer constant that can be moved into a register in one |
| 7349 | // instruction, return an SDValue of such a constant (will become a MOV |
| 7350 | // instruction). Otherwise return null. |
| 7351 | static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG, |
| 7352 | const ARMSubtarget *ST, const SDLoc &dl) { |
| 7353 | uint64_t Val; |
| 7354 | if (!isa<ConstantSDNode>(N)) |
| 7355 | return SDValue(); |
| 7356 | Val = cast<ConstantSDNode>(N)->getZExtValue(); |
| 7357 | |
| 7358 | if (ST->isThumb1Only()) { |
| 7359 | if (Val <= 255 || ~Val <= 255) |
| 7360 | return DAG.getConstant(Val, dl, MVT::i32); |
| 7361 | } else { |
| 7362 | if (ARM_AM::getSOImmVal(Val) != -1 || ARM_AM::getSOImmVal(~Val) != -1) |
| 7363 | return DAG.getConstant(Val, dl, MVT::i32); |
| 7364 | } |
| 7365 | return SDValue(); |
| 7366 | } |
| 7367 | |
| 7368 | static SDValue LowerBUILD_VECTOR_i1(SDValue Op, SelectionDAG &DAG, |
| 7369 | const ARMSubtarget *ST) { |
| 7370 | SDLoc dl(Op); |
| 7371 | EVT VT = Op.getValueType(); |
| 7372 | |
| 7373 | assert(ST->hasMVEIntegerOps() && "LowerBUILD_VECTOR_i1 called without MVE!" ); |
| 7374 | |
| 7375 | unsigned NumElts = VT.getVectorNumElements(); |
| 7376 | unsigned BoolMask; |
| 7377 | unsigned BitsPerBool; |
| 7378 | if (NumElts == 4) { |
| 7379 | BitsPerBool = 4; |
| 7380 | BoolMask = 0xf; |
| 7381 | } else if (NumElts == 8) { |
| 7382 | BitsPerBool = 2; |
| 7383 | BoolMask = 0x3; |
| 7384 | } else if (NumElts == 16) { |
| 7385 | BitsPerBool = 1; |
| 7386 | BoolMask = 0x1; |
| 7387 | } else |
| 7388 | return SDValue(); |
| 7389 | |
| 7390 | // If this is a single value copied into all lanes (a splat), we can just sign |
| 7391 | // extend that single value |
| 7392 | SDValue FirstOp = Op.getOperand(0); |
| 7393 | if (!isa<ConstantSDNode>(FirstOp) && |
| 7394 | std::all_of(std::next(Op->op_begin()), Op->op_end(), |
| 7395 | [&FirstOp](SDUse &U) { |
| 7396 | return U.get().isUndef() || U.get() == FirstOp; |
| 7397 | })) { |
| 7398 | SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::i32, FirstOp, |
| 7399 | DAG.getValueType(MVT::i1)); |
| 7400 | return DAG.getNode(ARMISD::PREDICATE_CAST, dl, Op.getValueType(), Ext); |
| 7401 | } |
| 7402 | |
| 7403 | // First create base with bits set where known |
| 7404 | unsigned Bits32 = 0; |
| 7405 | for (unsigned i = 0; i < NumElts; ++i) { |
| 7406 | SDValue V = Op.getOperand(i); |
| 7407 | if (!isa<ConstantSDNode>(V) && !V.isUndef()) |
| 7408 | continue; |
| 7409 | bool BitSet = V.isUndef() ? false : cast<ConstantSDNode>(V)->getZExtValue(); |
| 7410 | if (BitSet) |
| 7411 | Bits32 |= BoolMask << (i * BitsPerBool); |
| 7412 | } |
| 7413 | |
| 7414 | // Add in unknown nodes |
| 7415 | SDValue Base = DAG.getNode(ARMISD::PREDICATE_CAST, dl, VT, |
| 7416 | DAG.getConstant(Bits32, dl, MVT::i32)); |
| 7417 | for (unsigned i = 0; i < NumElts; ++i) { |
| 7418 | SDValue V = Op.getOperand(i); |
| 7419 | if (isa<ConstantSDNode>(V) || V.isUndef()) |
| 7420 | continue; |
| 7421 | Base = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Base, V, |
| 7422 | DAG.getConstant(i, dl, MVT::i32)); |
| 7423 | } |
| 7424 | |
| 7425 | return Base; |
| 7426 | } |
| 7427 | |
| 7428 | // If this is a case we can't handle, return null and let the default |
| 7429 | // expansion code take care of it. |
| 7430 | SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, |
| 7431 | const ARMSubtarget *ST) const { |
| 7432 | BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode()); |
| 7433 | SDLoc dl(Op); |
| 7434 | EVT VT = Op.getValueType(); |
| 7435 | |
| 7436 | if (ST->hasMVEIntegerOps() && VT.getScalarSizeInBits() == 1) |
| 7437 | return LowerBUILD_VECTOR_i1(Op, DAG, ST); |
| 7438 | |
| 7439 | APInt SplatBits, SplatUndef; |
| 7440 | unsigned SplatBitSize; |
| 7441 | bool HasAnyUndefs; |
| 7442 | if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { |
| 7443 | if (SplatUndef.isAllOnesValue()) |
| 7444 | return DAG.getUNDEF(VT); |
| 7445 | |
| 7446 | if ((ST->hasNEON() && SplatBitSize <= 64) || |
| 7447 | (ST->hasMVEIntegerOps() && SplatBitSize <= 64)) { |
| 7448 | // Check if an immediate VMOV works. |
| 7449 | EVT VmovVT; |
| 7450 | SDValue Val = |
| 7451 | isVMOVModifiedImm(SplatBits.getZExtValue(), SplatUndef.getZExtValue(), |
| 7452 | SplatBitSize, DAG, dl, VmovVT, VT, VMOVModImm); |
| 7453 | |
| 7454 | if (Val.getNode()) { |
| 7455 | SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val); |
| 7456 | return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); |
| 7457 | } |
| 7458 | |
| 7459 | // Try an immediate VMVN. |
| 7460 | uint64_t NegatedImm = (~SplatBits).getZExtValue(); |
| 7461 | Val = isVMOVModifiedImm( |
| 7462 | NegatedImm, SplatUndef.getZExtValue(), SplatBitSize, DAG, dl, VmovVT, |
| 7463 | VT, ST->hasMVEIntegerOps() ? MVEVMVNModImm : VMVNModImm); |
| 7464 | if (Val.getNode()) { |
| 7465 | SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val); |
| 7466 | return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); |
| 7467 | } |
| 7468 | |
| 7469 | // Use vmov.f32 to materialize other v2f32 and v4f32 splats. |
| 7470 | if ((VT == MVT::v2f32 || VT == MVT::v4f32) && SplatBitSize == 32) { |
| 7471 | int ImmVal = ARM_AM::getFP32Imm(SplatBits); |
| 7472 | if (ImmVal != -1) { |
| 7473 | SDValue Val = DAG.getTargetConstant(ImmVal, dl, MVT::i32); |
| 7474 | return DAG.getNode(ARMISD::VMOVFPIMM, dl, VT, Val); |
| 7475 | } |
| 7476 | } |
| 7477 | } |
| 7478 | } |
| 7479 | |
| 7480 | // Scan through the operands to see if only one value is used. |
| 7481 | // |
| 7482 | // As an optimisation, even if more than one value is used it may be more |
| 7483 | // profitable to splat with one value then change some lanes. |
| 7484 | // |
| 7485 | // Heuristically we decide to do this if the vector has a "dominant" value, |
| 7486 | // defined as splatted to more than half of the lanes. |
| 7487 | unsigned NumElts = VT.getVectorNumElements(); |
| 7488 | bool isOnlyLowElement = true; |
| 7489 | bool usesOnlyOneValue = true; |
| 7490 | bool hasDominantValue = false; |
| 7491 | bool isConstant = true; |
| 7492 | |
| 7493 | // Map of the number of times a particular SDValue appears in the |
| 7494 | // element list. |
| 7495 | DenseMap<SDValue, unsigned> ValueCounts; |
| 7496 | SDValue Value; |
| 7497 | for (unsigned i = 0; i < NumElts; ++i) { |
| 7498 | SDValue V = Op.getOperand(i); |
| 7499 | if (V.isUndef()) |
| 7500 | continue; |
| 7501 | if (i > 0) |
| 7502 | isOnlyLowElement = false; |
| 7503 | if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V)) |
| 7504 | isConstant = false; |
| 7505 | |
| 7506 | ValueCounts.insert(std::make_pair(V, 0)); |
| 7507 | unsigned &Count = ValueCounts[V]; |
| 7508 | |
| 7509 | // Is this value dominant? (takes up more than half of the lanes) |
| 7510 | if (++Count > (NumElts / 2)) { |
| 7511 | hasDominantValue = true; |
| 7512 | Value = V; |
| 7513 | } |
| 7514 | } |
| 7515 | if (ValueCounts.size() != 1) |
| 7516 | usesOnlyOneValue = false; |
| 7517 | if (!Value.getNode() && !ValueCounts.empty()) |
| 7518 | Value = ValueCounts.begin()->first; |
| 7519 | |
| 7520 | if (ValueCounts.empty()) |
| 7521 | return DAG.getUNDEF(VT); |
| 7522 | |
| 7523 | // Loads are better lowered with insert_vector_elt/ARMISD::BUILD_VECTOR. |
| 7524 | // Keep going if we are hitting this case. |
| 7525 | if (isOnlyLowElement && !ISD::isNormalLoad(Value.getNode())) |
| 7526 | return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value); |
| 7527 | |
| 7528 | unsigned EltSize = VT.getScalarSizeInBits(); |
| 7529 | |
| 7530 | // Use VDUP for non-constant splats. For f32 constant splats, reduce to |
| 7531 | // i32 and try again. |
| 7532 | if (hasDominantValue && EltSize <= 32) { |
| 7533 | if (!isConstant) { |
| 7534 | SDValue N; |
| 7535 | |
| 7536 | // If we are VDUPing a value that comes directly from a vector, that will |
| 7537 | // cause an unnecessary move to and from a GPR, where instead we could |
| 7538 | // just use VDUPLANE. We can only do this if the lane being extracted |
| 7539 | // is at a constant index, as the VDUP from lane instructions only have |
| 7540 | // constant-index forms. |
| 7541 | ConstantSDNode *constIndex; |
| 7542 | if (Value->getOpcode() == ISD::EXTRACT_VECTOR_ELT && |
| 7543 | (constIndex = dyn_cast<ConstantSDNode>(Value->getOperand(1)))) { |
| 7544 | // We need to create a new undef vector to use for the VDUPLANE if the |
| 7545 | // size of the vector from which we get the value is different than the |
| 7546 | // size of the vector that we need to create. We will insert the element |
| 7547 | // such that the register coalescer will remove unnecessary copies. |
| 7548 | if (VT != Value->getOperand(0).getValueType()) { |
| 7549 | unsigned index = constIndex->getAPIntValue().getLimitedValue() % |
| 7550 | VT.getVectorNumElements(); |
| 7551 | N = DAG.getNode(ARMISD::VDUPLANE, dl, VT, |
| 7552 | DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DAG.getUNDEF(VT), |
| 7553 | Value, DAG.getConstant(index, dl, MVT::i32)), |
| 7554 | DAG.getConstant(index, dl, MVT::i32)); |
| 7555 | } else |
| 7556 | N = DAG.getNode(ARMISD::VDUPLANE, dl, VT, |
| 7557 | Value->getOperand(0), Value->getOperand(1)); |
| 7558 | } else |
| 7559 | N = DAG.getNode(ARMISD::VDUP, dl, VT, Value); |
| 7560 | |
| 7561 | if (!usesOnlyOneValue) { |
| 7562 | // The dominant value was splatted as 'N', but we now have to insert |
| 7563 | // all differing elements. |
| 7564 | for (unsigned I = 0; I < NumElts; ++I) { |
| 7565 | if (Op.getOperand(I) == Value) |
| 7566 | continue; |
| 7567 | SmallVector<SDValue, 3> Ops; |
| 7568 | Ops.push_back(N); |
| 7569 | Ops.push_back(Op.getOperand(I)); |
| 7570 | Ops.push_back(DAG.getConstant(I, dl, MVT::i32)); |
| 7571 | N = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Ops); |
| 7572 | } |
| 7573 | } |
| 7574 | return N; |
| 7575 | } |
| 7576 | if (VT.getVectorElementType().isFloatingPoint()) { |
| 7577 | SmallVector<SDValue, 8> Ops; |
| 7578 | MVT FVT = VT.getVectorElementType().getSimpleVT(); |
| 7579 | assert(FVT == MVT::f32 || FVT == MVT::f16); |
| 7580 | MVT IVT = (FVT == MVT::f32) ? MVT::i32 : MVT::i16; |
| 7581 | for (unsigned i = 0; i < NumElts; ++i) |
| 7582 | Ops.push_back(DAG.getNode(ISD::BITCAST, dl, IVT, |
| 7583 | Op.getOperand(i))); |
| 7584 | EVT VecVT = EVT::getVectorVT(*DAG.getContext(), IVT, NumElts); |
| 7585 | SDValue Val = DAG.getBuildVector(VecVT, dl, Ops); |
| 7586 | Val = LowerBUILD_VECTOR(Val, DAG, ST); |
| 7587 | if (Val.getNode()) |
| 7588 | return DAG.getNode(ISD::BITCAST, dl, VT, Val); |
| 7589 | } |
| 7590 | if (usesOnlyOneValue) { |
| 7591 | SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl); |
| 7592 | if (isConstant && Val.getNode()) |
| 7593 | return DAG.getNode(ARMISD::VDUP, dl, VT, Val); |
| 7594 | } |
| 7595 | } |
| 7596 | |
| 7597 | // If all elements are constants and the case above didn't get hit, fall back |
| 7598 | // to the default expansion, which will generate a load from the constant |
| 7599 | // pool. |
| 7600 | if (isConstant) |
| 7601 | return SDValue(); |
| 7602 | |
| 7603 | // Reconstruct the BUILDVECTOR to one of the legal shuffles (such as vext and |
| 7604 | // vmovn). Empirical tests suggest this is rarely worth it for vectors of |
| 7605 | // length <= 2. |
| 7606 | if (NumElts >= 4) |
| 7607 | if (SDValue shuffle = ReconstructShuffle(Op, DAG)) |
| 7608 | return shuffle; |
| 7609 | |
| 7610 | // Attempt to turn a buildvector of scalar fptrunc's or fpext's back into |
| 7611 | // VCVT's |
| 7612 | if (SDValue VCVT = LowerBuildVectorOfFPTrunc(Op, DAG, Subtarget)) |
| 7613 | return VCVT; |
| 7614 | if (SDValue VCVT = LowerBuildVectorOfFPExt(Op, DAG, Subtarget)) |
| 7615 | return VCVT; |
| 7616 | |
| 7617 | if (ST->hasNEON() && VT.is128BitVector() && VT != MVT::v2f64 && VT != MVT::v4f32) { |
| 7618 | // If we haven't found an efficient lowering, try splitting a 128-bit vector |
| 7619 | // into two 64-bit vectors; we might discover a better way to lower it. |
| 7620 | SmallVector<SDValue, 64> Ops(Op->op_begin(), Op->op_begin() + NumElts); |
| 7621 | EVT ExtVT = VT.getVectorElementType(); |
| 7622 | EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElts / 2); |
| 7623 | SDValue Lower = |
| 7624 | DAG.getBuildVector(HVT, dl, makeArrayRef(&Ops[0], NumElts / 2)); |
| 7625 | if (Lower.getOpcode() == ISD::BUILD_VECTOR) |
| 7626 | Lower = LowerBUILD_VECTOR(Lower, DAG, ST); |
| 7627 | SDValue Upper = DAG.getBuildVector( |
| 7628 | HVT, dl, makeArrayRef(&Ops[NumElts / 2], NumElts / 2)); |
| 7629 | if (Upper.getOpcode() == ISD::BUILD_VECTOR) |
| 7630 | Upper = LowerBUILD_VECTOR(Upper, DAG, ST); |
| 7631 | if (Lower && Upper) |
| 7632 | return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lower, Upper); |
| 7633 | } |
| 7634 | |
| 7635 | // Vectors with 32- or 64-bit elements can be built by directly assigning |
| 7636 | // the subregisters. Lower it to an ARMISD::BUILD_VECTOR so the operands |
| 7637 | // will be legalized. |
| 7638 | if (EltSize >= 32) { |
| 7639 | // Do the expansion with floating-point types, since that is what the VFP |
| 7640 | // registers are defined to use, and since i64 is not legal. |
| 7641 | EVT EltVT = EVT::getFloatingPointVT(EltSize); |
| 7642 | EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); |
| 7643 | SmallVector<SDValue, 8> Ops; |
| 7644 | for (unsigned i = 0; i < NumElts; ++i) |
| 7645 | Ops.push_back(DAG.getNode(ISD::BITCAST, dl, EltVT, Op.getOperand(i))); |
| 7646 | SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, Ops); |
| 7647 | return DAG.getNode(ISD::BITCAST, dl, VT, Val); |
| 7648 | } |
| 7649 | |
| 7650 | // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we |
| 7651 | // know the default expansion would otherwise fall back on something even |
| 7652 | // worse. For a vector with one or two non-undef values, that's |
| 7653 | // scalar_to_vector for the elements followed by a shuffle (provided the |
| 7654 | // shuffle is valid for the target) and materialization element by element |
| 7655 | // on the stack followed by a load for everything else. |
| 7656 | if (!isConstant && !usesOnlyOneValue) { |
| 7657 | SDValue Vec = DAG.getUNDEF(VT); |
| 7658 | for (unsigned i = 0 ; i < NumElts; ++i) { |
| 7659 | SDValue V = Op.getOperand(i); |
| 7660 | if (V.isUndef()) |
| 7661 | continue; |
| 7662 | SDValue LaneIdx = DAG.getConstant(i, dl, MVT::i32); |
| 7663 | Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Vec, V, LaneIdx); |
| 7664 | } |
| 7665 | return Vec; |
| 7666 | } |
| 7667 | |
| 7668 | return SDValue(); |
| 7669 | } |
| 7670 | |
| 7671 | // Gather data to see if the operation can be modelled as a |
| 7672 | // shuffle in combination with VEXTs. |
| 7673 | SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op, |
| 7674 | SelectionDAG &DAG) const { |
| 7675 | assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!" ); |
| 7676 | SDLoc dl(Op); |
| 7677 | EVT VT = Op.getValueType(); |
| 7678 | unsigned NumElts = VT.getVectorNumElements(); |
| 7679 | |
| 7680 | struct ShuffleSourceInfo { |
| 7681 | SDValue Vec; |
| 7682 | unsigned MinElt = std::numeric_limits<unsigned>::max(); |
| 7683 | unsigned MaxElt = 0; |
| 7684 | |
| 7685 | // We may insert some combination of BITCASTs and VEXT nodes to force Vec to |
| 7686 | // be compatible with the shuffle we intend to construct. As a result |
| 7687 | // ShuffleVec will be some sliding window into the original Vec. |
| 7688 | SDValue ShuffleVec; |
| 7689 | |
| 7690 | // Code should guarantee that element i in Vec starts at element "WindowBase |
| 7691 | // + i * WindowScale in ShuffleVec". |
| 7692 | int WindowBase = 0; |
| 7693 | int WindowScale = 1; |
| 7694 | |
| 7695 | ShuffleSourceInfo(SDValue Vec) : Vec(Vec), ShuffleVec(Vec) {} |
| 7696 | |
| 7697 | bool operator ==(SDValue OtherVec) { return Vec == OtherVec; } |
| 7698 | }; |
| 7699 | |
| 7700 | // First gather all vectors used as an immediate source for this BUILD_VECTOR |
| 7701 | // node. |
| 7702 | SmallVector<ShuffleSourceInfo, 2> Sources; |
| 7703 | for (unsigned i = 0; i < NumElts; ++i) { |
| 7704 | SDValue V = Op.getOperand(i); |
| 7705 | if (V.isUndef()) |
| 7706 | continue; |
| 7707 | else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) { |
| 7708 | // A shuffle can only come from building a vector from various |
| 7709 | // elements of other vectors. |
| 7710 | return SDValue(); |
| 7711 | } else if (!isa<ConstantSDNode>(V.getOperand(1))) { |
| 7712 | // Furthermore, shuffles require a constant mask, whereas extractelts |
| 7713 | // accept variable indices. |
| 7714 | return SDValue(); |
| 7715 | } |
| 7716 | |
| 7717 | // Add this element source to the list if it's not already there. |
| 7718 | SDValue SourceVec = V.getOperand(0); |
| 7719 | auto Source = llvm::find(Sources, SourceVec); |
| 7720 | if (Source == Sources.end()) |
| 7721 | Source = Sources.insert(Sources.end(), ShuffleSourceInfo(SourceVec)); |
| 7722 | |
| 7723 | // Update the minimum and maximum lane number seen. |
| 7724 | unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue(); |
| 7725 | Source->MinElt = std::min(Source->MinElt, EltNo); |
| 7726 | Source->MaxElt = std::max(Source->MaxElt, EltNo); |
| 7727 | } |
| 7728 | |
| 7729 | // Currently only do something sane when at most two source vectors |
| 7730 | // are involved. |
| 7731 | if (Sources.size() > 2) |
| 7732 | return SDValue(); |
| 7733 | |
| 7734 | // Find out the smallest element size among result and two sources, and use |
| 7735 | // it as element size to build the shuffle_vector. |
| 7736 | EVT SmallestEltTy = VT.getVectorElementType(); |
| 7737 | for (auto &Source : Sources) { |
| 7738 | EVT SrcEltTy = Source.Vec.getValueType().getVectorElementType(); |
| 7739 | if (SrcEltTy.bitsLT(SmallestEltTy)) |
| 7740 | SmallestEltTy = SrcEltTy; |
| 7741 | } |
| 7742 | unsigned ResMultiplier = |
| 7743 | VT.getScalarSizeInBits() / SmallestEltTy.getSizeInBits(); |
| 7744 | NumElts = VT.getSizeInBits() / SmallestEltTy.getSizeInBits(); |
| 7745 | EVT ShuffleVT = EVT::getVectorVT(*DAG.getContext(), SmallestEltTy, NumElts); |
| 7746 | |
| 7747 | // If the source vector is too wide or too narrow, we may nevertheless be able |
| 7748 | // to construct a compatible shuffle either by concatenating it with UNDEF or |
| 7749 | // extracting a suitable range of elements. |
| 7750 | for (auto &Src : Sources) { |
| 7751 | EVT SrcVT = Src.ShuffleVec.getValueType(); |
| 7752 | |
| 7753 | uint64_t SrcVTSize = SrcVT.getFixedSizeInBits(); |
| 7754 | uint64_t VTSize = VT.getFixedSizeInBits(); |
| 7755 | if (SrcVTSize == VTSize) |
| 7756 | continue; |
| 7757 | |
| 7758 | // This stage of the search produces a source with the same element type as |
| 7759 | // the original, but with a total width matching the BUILD_VECTOR output. |
| 7760 | EVT EltVT = SrcVT.getVectorElementType(); |
| 7761 | unsigned NumSrcElts = VTSize / EltVT.getFixedSizeInBits(); |
| 7762 | EVT DestVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumSrcElts); |
| 7763 | |
| 7764 | if (SrcVTSize < VTSize) { |
| 7765 | if (2 * SrcVTSize != VTSize) |
| 7766 | return SDValue(); |
| 7767 | // We can pad out the smaller vector for free, so if it's part of a |
| 7768 | // shuffle... |
| 7769 | Src.ShuffleVec = |
| 7770 | DAG.getNode(ISD::CONCAT_VECTORS, dl, DestVT, Src.ShuffleVec, |
| 7771 | DAG.getUNDEF(Src.ShuffleVec.getValueType())); |
| 7772 | continue; |
| 7773 | } |
| 7774 | |
| 7775 | if (SrcVTSize != 2 * VTSize) |
| 7776 | return SDValue(); |
| 7777 | |
| 7778 | if (Src.MaxElt - Src.MinElt >= NumSrcElts) { |
| 7779 | // Span too large for a VEXT to cope |
| 7780 | return SDValue(); |
| 7781 | } |
| 7782 | |
| 7783 | if (Src.MinElt >= NumSrcElts) { |
| 7784 | // The extraction can just take the second half |
| 7785 | Src.ShuffleVec = |
| 7786 | DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, |
| 7787 | DAG.getConstant(NumSrcElts, dl, MVT::i32)); |
| 7788 | Src.WindowBase = -NumSrcElts; |
| 7789 | } else if (Src.MaxElt < NumSrcElts) { |
| 7790 | // The extraction can just take the first half |
| 7791 | Src.ShuffleVec = |
| 7792 | DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, |
| 7793 | DAG.getConstant(0, dl, MVT::i32)); |
| 7794 | } else { |
| 7795 | // An actual VEXT is needed |
| 7796 | SDValue VEXTSrc1 = |
| 7797 | DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, |
| 7798 | DAG.getConstant(0, dl, MVT::i32)); |
| 7799 | SDValue VEXTSrc2 = |
| 7800 | DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, |
| 7801 | DAG.getConstant(NumSrcElts, dl, MVT::i32)); |
| 7802 | |
| 7803 | Src.ShuffleVec = DAG.getNode(ARMISD::VEXT, dl, DestVT, VEXTSrc1, |
| 7804 | VEXTSrc2, |
| 7805 | DAG.getConstant(Src.MinElt, dl, MVT::i32)); |
| 7806 | Src.WindowBase = -Src.MinElt; |
| 7807 | } |
| 7808 | } |
| 7809 | |
| 7810 | // Another possible incompatibility occurs from the vector element types. We |
| 7811 | // can fix this by bitcasting the source vectors to the same type we intend |
| 7812 | // for the shuffle. |
| 7813 | for (auto &Src : Sources) { |
| 7814 | EVT SrcEltTy = Src.ShuffleVec.getValueType().getVectorElementType(); |
| 7815 | if (SrcEltTy == SmallestEltTy) |
| 7816 | continue; |
| 7817 | assert(ShuffleVT.getVectorElementType() == SmallestEltTy); |
| 7818 | Src.ShuffleVec = DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, ShuffleVT, Src.ShuffleVec); |
| 7819 | Src.WindowScale = SrcEltTy.getSizeInBits() / SmallestEltTy.getSizeInBits(); |
| 7820 | Src.WindowBase *= Src.WindowScale; |
| 7821 | } |
| 7822 | |
| 7823 | // Final sanity check before we try to actually produce a shuffle. |
| 7824 | LLVM_DEBUG(for (auto Src |
| 7825 | : Sources) |
| 7826 | assert(Src.ShuffleVec.getValueType() == ShuffleVT);); |
| 7827 | |
| 7828 | // The stars all align, our next step is to produce the mask for the shuffle. |
| 7829 | SmallVector<int, 8> Mask(ShuffleVT.getVectorNumElements(), -1); |
| 7830 | int BitsPerShuffleLane = ShuffleVT.getScalarSizeInBits(); |
| 7831 | for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) { |
| 7832 | SDValue Entry = Op.getOperand(i); |
| 7833 | if (Entry.isUndef()) |
| 7834 | continue; |
| 7835 | |
| 7836 | auto Src = llvm::find(Sources, Entry.getOperand(0)); |
| 7837 | int EltNo = cast<ConstantSDNode>(Entry.getOperand(1))->getSExtValue(); |
| 7838 | |
| 7839 | // EXTRACT_VECTOR_ELT performs an implicit any_ext; BUILD_VECTOR an implicit |
| 7840 | // trunc. So only std::min(SrcBits, DestBits) actually get defined in this |
| 7841 | // segment. |
| 7842 | EVT OrigEltTy = Entry.getOperand(0).getValueType().getVectorElementType(); |
| 7843 | int BitsDefined = std::min(OrigEltTy.getScalarSizeInBits(), |
| 7844 | VT.getScalarSizeInBits()); |
| 7845 | int LanesDefined = BitsDefined / BitsPerShuffleLane; |
| 7846 | |
| 7847 | // This source is expected to fill ResMultiplier lanes of the final shuffle, |
| 7848 | // starting at the appropriate offset. |
| 7849 | int *LaneMask = &Mask[i * ResMultiplier]; |
| 7850 | |
| 7851 | int = EltNo * Src->WindowScale + Src->WindowBase; |
| 7852 | ExtractBase += NumElts * (Src - Sources.begin()); |
| 7853 | for (int j = 0; j < LanesDefined; ++j) |
| 7854 | LaneMask[j] = ExtractBase + j; |
| 7855 | } |
| 7856 | |
| 7857 | |
| 7858 | // We can't handle more than two sources. This should have already |
| 7859 | // been checked before this point. |
| 7860 | assert(Sources.size() <= 2 && "Too many sources!" ); |
| 7861 | |
| 7862 | SDValue ShuffleOps[] = { DAG.getUNDEF(ShuffleVT), DAG.getUNDEF(ShuffleVT) }; |
| 7863 | for (unsigned i = 0; i < Sources.size(); ++i) |
| 7864 | ShuffleOps[i] = Sources[i].ShuffleVec; |
| 7865 | |
| 7866 | SDValue Shuffle = buildLegalVectorShuffle(ShuffleVT, dl, ShuffleOps[0], |
| 7867 | ShuffleOps[1], Mask, DAG); |
| 7868 | if (!Shuffle) |
| 7869 | return SDValue(); |
| 7870 | return DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, VT, Shuffle); |
| 7871 | } |
| 7872 | |
| 7873 | enum ShuffleOpCodes { |
| 7874 | OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> |
| 7875 | OP_VREV, |
| 7876 | OP_VDUP0, |
| 7877 | OP_VDUP1, |
| 7878 | OP_VDUP2, |
| 7879 | OP_VDUP3, |
| 7880 | OP_VEXT1, |
| 7881 | OP_VEXT2, |
| 7882 | OP_VEXT3, |
| 7883 | OP_VUZPL, // VUZP, left result |
| 7884 | OP_VUZPR, // VUZP, right result |
| 7885 | OP_VZIPL, // VZIP, left result |
| 7886 | OP_VZIPR, // VZIP, right result |
| 7887 | OP_VTRNL, // VTRN, left result |
| 7888 | OP_VTRNR // VTRN, right result |
| 7889 | }; |
| 7890 | |
| 7891 | static bool isLegalMVEShuffleOp(unsigned PFEntry) { |
| 7892 | unsigned OpNum = (PFEntry >> 26) & 0x0F; |
| 7893 | switch (OpNum) { |
| 7894 | case OP_COPY: |
| 7895 | case OP_VREV: |
| 7896 | case OP_VDUP0: |
| 7897 | case OP_VDUP1: |
| 7898 | case OP_VDUP2: |
| 7899 | case OP_VDUP3: |
| 7900 | return true; |
| 7901 | } |
| 7902 | return false; |
| 7903 | } |
| 7904 | |
| 7905 | /// isShuffleMaskLegal - Targets can use this to indicate that they only |
| 7906 | /// support *some* VECTOR_SHUFFLE operations, those with specific masks. |
| 7907 | /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values |
| 7908 | /// are assumed to be legal. |
| 7909 | bool ARMTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const { |
| 7910 | if (VT.getVectorNumElements() == 4 && |
| 7911 | (VT.is128BitVector() || VT.is64BitVector())) { |
| 7912 | unsigned PFIndexes[4]; |
| 7913 | for (unsigned i = 0; i != 4; ++i) { |
| 7914 | if (M[i] < 0) |
| 7915 | PFIndexes[i] = 8; |
| 7916 | else |
| 7917 | PFIndexes[i] = M[i]; |
| 7918 | } |
| 7919 | |
| 7920 | // Compute the index in the perfect shuffle table. |
| 7921 | unsigned PFTableIndex = |
| 7922 | PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; |
| 7923 | unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; |
| 7924 | unsigned Cost = (PFEntry >> 30); |
| 7925 | |
| 7926 | if (Cost <= 4 && (Subtarget->hasNEON() || isLegalMVEShuffleOp(PFEntry))) |
| 7927 | return true; |
| 7928 | } |
| 7929 | |
| 7930 | bool ReverseVEXT, isV_UNDEF; |
| 7931 | unsigned Imm, WhichResult; |
| 7932 | |
| 7933 | unsigned EltSize = VT.getScalarSizeInBits(); |
| 7934 | if (EltSize >= 32 || |
| 7935 | ShuffleVectorSDNode::isSplatMask(&M[0], VT) || |
| 7936 | ShuffleVectorInst::isIdentityMask(M) || |
| 7937 | isVREVMask(M, VT, 64) || |
| 7938 | isVREVMask(M, VT, 32) || |
| 7939 | isVREVMask(M, VT, 16)) |
| 7940 | return true; |
| 7941 | else if (Subtarget->hasNEON() && |
| 7942 | (isVEXTMask(M, VT, ReverseVEXT, Imm) || |
| 7943 | isVTBLMask(M, VT) || |
| 7944 | isNEONTwoResultShuffleMask(M, VT, WhichResult, isV_UNDEF))) |
| 7945 | return true; |
| 7946 | else if (Subtarget->hasNEON() && (VT == MVT::v8i16 || VT == MVT::v16i8) && |
| 7947 | isReverseMask(M, VT)) |
| 7948 | return true; |
| 7949 | else if (Subtarget->hasMVEIntegerOps() && |
| 7950 | (isVMOVNMask(M, VT, 0) || isVMOVNMask(M, VT, 1))) |
| 7951 | return true; |
| 7952 | else |
| 7953 | return false; |
| 7954 | } |
| 7955 | |
| 7956 | /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit |
| 7957 | /// the specified operations to build the shuffle. |
| 7958 | static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, |
| 7959 | SDValue RHS, SelectionDAG &DAG, |
| 7960 | const SDLoc &dl) { |
| 7961 | unsigned OpNum = (PFEntry >> 26) & 0x0F; |
| 7962 | unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); |
| 7963 | unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); |
| 7964 | |
| 7965 | if (OpNum == OP_COPY) { |
| 7966 | if (LHSID == (1*9+2)*9+3) return LHS; |
| 7967 | assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!" ); |
| 7968 | return RHS; |
| 7969 | } |
| 7970 | |
| 7971 | SDValue OpLHS, OpRHS; |
| 7972 | OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); |
| 7973 | OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); |
| 7974 | EVT VT = OpLHS.getValueType(); |
| 7975 | |
| 7976 | switch (OpNum) { |
| 7977 | default: llvm_unreachable("Unknown shuffle opcode!" ); |
| 7978 | case OP_VREV: |
| 7979 | // VREV divides the vector in half and swaps within the half. |
| 7980 | if (VT.getVectorElementType() == MVT::i32 || |
| 7981 | VT.getVectorElementType() == MVT::f32) |
| 7982 | return DAG.getNode(ARMISD::VREV64, dl, VT, OpLHS); |
| 7983 | // vrev <4 x i16> -> VREV32 |
| 7984 | if (VT.getVectorElementType() == MVT::i16) |
| 7985 | return DAG.getNode(ARMISD::VREV32, dl, VT, OpLHS); |
| 7986 | // vrev <4 x i8> -> VREV16 |
| 7987 | assert(VT.getVectorElementType() == MVT::i8); |
| 7988 | return DAG.getNode(ARMISD::VREV16, dl, VT, OpLHS); |
| 7989 | case OP_VDUP0: |
| 7990 | case OP_VDUP1: |
| 7991 | case OP_VDUP2: |
| 7992 | case OP_VDUP3: |
| 7993 | return DAG.getNode(ARMISD::VDUPLANE, dl, VT, |
| 7994 | OpLHS, DAG.getConstant(OpNum-OP_VDUP0, dl, MVT::i32)); |
| 7995 | case OP_VEXT1: |
| 7996 | case OP_VEXT2: |
| 7997 | case OP_VEXT3: |
| 7998 | return DAG.getNode(ARMISD::VEXT, dl, VT, |
| 7999 | OpLHS, OpRHS, |
| 8000 | DAG.getConstant(OpNum - OP_VEXT1 + 1, dl, MVT::i32)); |
| 8001 | case OP_VUZPL: |
| 8002 | case OP_VUZPR: |
| 8003 | return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), |
| 8004 | OpLHS, OpRHS).getValue(OpNum-OP_VUZPL); |
| 8005 | case OP_VZIPL: |
| 8006 | case OP_VZIPR: |
| 8007 | return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), |
| 8008 | OpLHS, OpRHS).getValue(OpNum-OP_VZIPL); |
| 8009 | case OP_VTRNL: |
| 8010 | case OP_VTRNR: |
| 8011 | return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), |
| 8012 | OpLHS, OpRHS).getValue(OpNum-OP_VTRNL); |
| 8013 | } |
| 8014 | } |
| 8015 | |
| 8016 | static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op, |
| 8017 | ArrayRef<int> ShuffleMask, |
| 8018 | SelectionDAG &DAG) { |
| 8019 | // Check to see if we can use the VTBL instruction. |
| 8020 | SDValue V1 = Op.getOperand(0); |
| 8021 | SDValue V2 = Op.getOperand(1); |
| 8022 | SDLoc DL(Op); |
| 8023 | |
| 8024 | SmallVector<SDValue, 8> VTBLMask; |
| 8025 | for (ArrayRef<int>::iterator |
| 8026 | I = ShuffleMask.begin(), E = ShuffleMask.end(); I != E; ++I) |
| 8027 | VTBLMask.push_back(DAG.getConstant(*I, DL, MVT::i32)); |
| 8028 | |
| 8029 | if (V2.getNode()->isUndef()) |
| 8030 | return DAG.getNode(ARMISD::VTBL1, DL, MVT::v8i8, V1, |
| 8031 | DAG.getBuildVector(MVT::v8i8, DL, VTBLMask)); |
| 8032 | |
| 8033 | return DAG.getNode(ARMISD::VTBL2, DL, MVT::v8i8, V1, V2, |
| 8034 | DAG.getBuildVector(MVT::v8i8, DL, VTBLMask)); |
| 8035 | } |
| 8036 | |
| 8037 | static SDValue LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(SDValue Op, |
| 8038 | SelectionDAG &DAG) { |
| 8039 | SDLoc DL(Op); |
| 8040 | SDValue OpLHS = Op.getOperand(0); |
| 8041 | EVT VT = OpLHS.getValueType(); |
| 8042 | |
| 8043 | assert((VT == MVT::v8i16 || VT == MVT::v16i8) && |
| 8044 | "Expect an v8i16/v16i8 type" ); |
| 8045 | OpLHS = DAG.getNode(ARMISD::VREV64, DL, VT, OpLHS); |
| 8046 | // For a v16i8 type: After the VREV, we have got <8, ...15, 8, ..., 0>. Now, |
| 8047 | // extract the first 8 bytes into the top double word and the last 8 bytes |
| 8048 | // into the bottom double word. The v8i16 case is similar. |
| 8049 | unsigned = (VT == MVT::v16i8) ? 8 : 4; |
| 8050 | return DAG.getNode(ARMISD::VEXT, DL, VT, OpLHS, OpLHS, |
| 8051 | DAG.getConstant(ExtractNum, DL, MVT::i32)); |
| 8052 | } |
| 8053 | |
| 8054 | static EVT getVectorTyFromPredicateVector(EVT VT) { |
| 8055 | switch (VT.getSimpleVT().SimpleTy) { |
| 8056 | case MVT::v4i1: |
| 8057 | return MVT::v4i32; |
| 8058 | case MVT::v8i1: |
| 8059 | return MVT::v8i16; |
| 8060 | case MVT::v16i1: |
| 8061 | return MVT::v16i8; |
| 8062 | default: |
| 8063 | llvm_unreachable("Unexpected vector predicate type" ); |
| 8064 | } |
| 8065 | } |
| 8066 | |
| 8067 | static SDValue PromoteMVEPredVector(SDLoc dl, SDValue Pred, EVT VT, |
| 8068 | SelectionDAG &DAG) { |
| 8069 | // Converting from boolean predicates to integers involves creating a vector |
| 8070 | // of all ones or all zeroes and selecting the lanes based upon the real |
| 8071 | // predicate. |
| 8072 | SDValue AllOnes = |
| 8073 | DAG.getTargetConstant(ARM_AM::createVMOVModImm(0xe, 0xff), dl, MVT::i32); |
| 8074 | AllOnes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v16i8, AllOnes); |
| 8075 | |
| 8076 | SDValue AllZeroes = |
| 8077 | DAG.getTargetConstant(ARM_AM::createVMOVModImm(0xe, 0x0), dl, MVT::i32); |
| 8078 | AllZeroes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v16i8, AllZeroes); |
| 8079 | |
| 8080 | // Get full vector type from predicate type |
| 8081 | EVT NewVT = getVectorTyFromPredicateVector(VT); |
| 8082 | |
| 8083 | SDValue RecastV1; |
| 8084 | // If the real predicate is an v8i1 or v4i1 (not v16i1) then we need to recast |
| 8085 | // this to a v16i1. This cannot be done with an ordinary bitcast because the |
| 8086 | // sizes are not the same. We have to use a MVE specific PREDICATE_CAST node, |
| 8087 | // since we know in hardware the sizes are really the same. |
| 8088 | if (VT != MVT::v16i1) |
| 8089 | RecastV1 = DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::v16i1, Pred); |
| 8090 | else |
| 8091 | RecastV1 = Pred; |
| 8092 | |
| 8093 | // Select either all ones or zeroes depending upon the real predicate bits. |
| 8094 | SDValue PredAsVector = |
| 8095 | DAG.getNode(ISD::VSELECT, dl, MVT::v16i8, RecastV1, AllOnes, AllZeroes); |
| 8096 | |
| 8097 | // Recast our new predicate-as-integer v16i8 vector into something |
| 8098 | // appropriate for the shuffle, i.e. v4i32 for a real v4i1 predicate. |
| 8099 | return DAG.getNode(ISD::BITCAST, dl, NewVT, PredAsVector); |
| 8100 | } |
| 8101 | |
| 8102 | static SDValue LowerVECTOR_SHUFFLE_i1(SDValue Op, SelectionDAG &DAG, |
| 8103 | const ARMSubtarget *ST) { |
| 8104 | EVT VT = Op.getValueType(); |
| 8105 | ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); |
| 8106 | ArrayRef<int> ShuffleMask = SVN->getMask(); |
| 8107 | |
| 8108 | assert(ST->hasMVEIntegerOps() && |
| 8109 | "No support for vector shuffle of boolean predicates" ); |
| 8110 | |
| 8111 | SDValue V1 = Op.getOperand(0); |
| 8112 | SDLoc dl(Op); |
| 8113 | if (isReverseMask(ShuffleMask, VT)) { |
| 8114 | SDValue cast = DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::i32, V1); |
| 8115 | SDValue rbit = DAG.getNode(ISD::BITREVERSE, dl, MVT::i32, cast); |
| 8116 | SDValue srl = DAG.getNode(ISD::SRL, dl, MVT::i32, rbit, |
| 8117 | DAG.getConstant(16, dl, MVT::i32)); |
| 8118 | return DAG.getNode(ARMISD::PREDICATE_CAST, dl, VT, srl); |
| 8119 | } |
| 8120 | |
| 8121 | // Until we can come up with optimised cases for every single vector |
| 8122 | // shuffle in existence we have chosen the least painful strategy. This is |
| 8123 | // to essentially promote the boolean predicate to a 8-bit integer, where |
| 8124 | // each predicate represents a byte. Then we fall back on a normal integer |
| 8125 | // vector shuffle and convert the result back into a predicate vector. In |
| 8126 | // many cases the generated code might be even better than scalar code |
| 8127 | // operating on bits. Just imagine trying to shuffle 8 arbitrary 2-bit |
| 8128 | // fields in a register into 8 other arbitrary 2-bit fields! |
| 8129 | SDValue PredAsVector = PromoteMVEPredVector(dl, V1, VT, DAG); |
| 8130 | EVT NewVT = PredAsVector.getValueType(); |
| 8131 | |
| 8132 | // Do the shuffle! |
| 8133 | SDValue Shuffled = DAG.getVectorShuffle(NewVT, dl, PredAsVector, |
| 8134 | DAG.getUNDEF(NewVT), ShuffleMask); |
| 8135 | |
| 8136 | // Now return the result of comparing the shuffled vector with zero, |
| 8137 | // which will generate a real predicate, i.e. v4i1, v8i1 or v16i1. |
| 8138 | return DAG.getNode(ARMISD::VCMPZ, dl, VT, Shuffled, |
| 8139 | DAG.getConstant(ARMCC::NE, dl, MVT::i32)); |
| 8140 | } |
| 8141 | |
| 8142 | static SDValue LowerVECTOR_SHUFFLEUsingMovs(SDValue Op, |
| 8143 | ArrayRef<int> ShuffleMask, |
| 8144 | SelectionDAG &DAG) { |
| 8145 | // Attempt to lower the vector shuffle using as many whole register movs as |
| 8146 | // possible. This is useful for types smaller than 32bits, which would |
| 8147 | // often otherwise become a series for grp movs. |
| 8148 | SDLoc dl(Op); |
| 8149 | EVT VT = Op.getValueType(); |
| 8150 | if (VT.getScalarSizeInBits() >= 32) |
| 8151 | return SDValue(); |
| 8152 | |
| 8153 | assert((VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v16i8) && |
| 8154 | "Unexpected vector type" ); |
| 8155 | int NumElts = VT.getVectorNumElements(); |
| 8156 | int QuarterSize = NumElts / 4; |
| 8157 | // The four final parts of the vector, as i32's |
| 8158 | SDValue Parts[4]; |
| 8159 | |
| 8160 | // Look for full lane vmovs like <0,1,2,3> or <u,5,6,7> etc, (but not |
| 8161 | // <u,u,u,u>), returning the vmov lane index |
| 8162 | auto getMovIdx = [](ArrayRef<int> ShuffleMask, int Start, int Length) { |
| 8163 | // Detect which mov lane this would be from the first non-undef element. |
| 8164 | int MovIdx = -1; |
| 8165 | for (int i = 0; i < Length; i++) { |
| 8166 | if (ShuffleMask[Start + i] >= 0) { |
| 8167 | if (ShuffleMask[Start + i] % Length != i) |
| 8168 | return -1; |
| 8169 | MovIdx = ShuffleMask[Start + i] / Length; |
| 8170 | break; |
| 8171 | } |
| 8172 | } |
| 8173 | // If all items are undef, leave this for other combines |
| 8174 | if (MovIdx == -1) |
| 8175 | return -1; |
| 8176 | // Check the remaining values are the correct part of the same mov |
| 8177 | for (int i = 1; i < Length; i++) { |
| 8178 | if (ShuffleMask[Start + i] >= 0 && |
| 8179 | (ShuffleMask[Start + i] / Length != MovIdx || |
| 8180 | ShuffleMask[Start + i] % Length != i)) |
| 8181 | return -1; |
| 8182 | } |
| 8183 | return MovIdx; |
| 8184 | }; |
| 8185 | |
| 8186 | for (int Part = 0; Part < 4; ++Part) { |
| 8187 | // Does this part look like a mov |
| 8188 | int Elt = getMovIdx(ShuffleMask, Part * QuarterSize, QuarterSize); |
| 8189 | if (Elt != -1) { |
| 8190 | SDValue Input = Op->getOperand(0); |
| 8191 | if (Elt >= 4) { |
| 8192 | Input = Op->getOperand(1); |
| 8193 | Elt -= 4; |
| 8194 | } |
| 8195 | SDValue BitCast = DAG.getBitcast(MVT::v4i32, Input); |
| 8196 | Parts[Part] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, BitCast, |
| 8197 | DAG.getConstant(Elt, dl, MVT::i32)); |
| 8198 | } |
| 8199 | } |
| 8200 | |
| 8201 | // Nothing interesting found, just return |
| 8202 | if (!Parts[0] && !Parts[1] && !Parts[2] && !Parts[3]) |
| 8203 | return SDValue(); |
| 8204 | |
| 8205 | // The other parts need to be built with the old shuffle vector, cast to a |
| 8206 | // v4i32 and extract_vector_elts |
| 8207 | if (!Parts[0] || !Parts[1] || !Parts[2] || !Parts[3]) { |
| 8208 | SmallVector<int, 16> NewShuffleMask; |
| 8209 | for (int Part = 0; Part < 4; ++Part) |
| 8210 | for (int i = 0; i < QuarterSize; i++) |
| 8211 | NewShuffleMask.push_back( |
| 8212 | Parts[Part] ? -1 : ShuffleMask[Part * QuarterSize + i]); |
| 8213 | SDValue NewShuffle = DAG.getVectorShuffle( |
| 8214 | VT, dl, Op->getOperand(0), Op->getOperand(1), NewShuffleMask); |
| 8215 | SDValue BitCast = DAG.getBitcast(MVT::v4i32, NewShuffle); |
| 8216 | |
| 8217 | for (int Part = 0; Part < 4; ++Part) |
| 8218 | if (!Parts[Part]) |
| 8219 | Parts[Part] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, |
| 8220 | BitCast, DAG.getConstant(Part, dl, MVT::i32)); |
| 8221 | } |
| 8222 | // Build a vector out of the various parts and bitcast it back to the original |
| 8223 | // type. |
| 8224 | SDValue NewVec = DAG.getBuildVector(MVT::v4i32, dl, Parts); |
| 8225 | return DAG.getBitcast(VT, NewVec); |
| 8226 | } |
| 8227 | |
| 8228 | static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG, |
| 8229 | const ARMSubtarget *ST) { |
| 8230 | SDValue V1 = Op.getOperand(0); |
| 8231 | SDValue V2 = Op.getOperand(1); |
| 8232 | SDLoc dl(Op); |
| 8233 | EVT VT = Op.getValueType(); |
| 8234 | ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); |
| 8235 | unsigned EltSize = VT.getScalarSizeInBits(); |
| 8236 | |
| 8237 | if (ST->hasMVEIntegerOps() && EltSize == 1) |
| 8238 | return LowerVECTOR_SHUFFLE_i1(Op, DAG, ST); |
| 8239 | |
| 8240 | // Convert shuffles that are directly supported on NEON to target-specific |
| 8241 | // DAG nodes, instead of keeping them as shuffles and matching them again |
| 8242 | // during code selection. This is more efficient and avoids the possibility |
| 8243 | // of inconsistencies between legalization and selection. |
| 8244 | // FIXME: floating-point vectors should be canonicalized to integer vectors |
| 8245 | // of the same time so that they get CSEd properly. |
| 8246 | ArrayRef<int> ShuffleMask = SVN->getMask(); |
| 8247 | |
| 8248 | if (EltSize <= 32) { |
| 8249 | if (SVN->isSplat()) { |
| 8250 | int Lane = SVN->getSplatIndex(); |
| 8251 | // If this is undef splat, generate it via "just" vdup, if possible. |
| 8252 | if (Lane == -1) Lane = 0; |
| 8253 | |
| 8254 | // Test if V1 is a SCALAR_TO_VECTOR. |
| 8255 | if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) { |
| 8256 | return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0)); |
| 8257 | } |
| 8258 | // Test if V1 is a BUILD_VECTOR which is equivalent to a SCALAR_TO_VECTOR |
| 8259 | // (and probably will turn into a SCALAR_TO_VECTOR once legalization |
| 8260 | // reaches it). |
| 8261 | if (Lane == 0 && V1.getOpcode() == ISD::BUILD_VECTOR && |
| 8262 | !isa<ConstantSDNode>(V1.getOperand(0))) { |
| 8263 | bool IsScalarToVector = true; |
| 8264 | for (unsigned i = 1, e = V1.getNumOperands(); i != e; ++i) |
| 8265 | if (!V1.getOperand(i).isUndef()) { |
| 8266 | IsScalarToVector = false; |
| 8267 | break; |
| 8268 | } |
| 8269 | if (IsScalarToVector) |
| 8270 | return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0)); |
| 8271 | } |
| 8272 | return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1, |
| 8273 | DAG.getConstant(Lane, dl, MVT::i32)); |
| 8274 | } |
| 8275 | |
| 8276 | bool ReverseVEXT = false; |
| 8277 | unsigned Imm = 0; |
| 8278 | if (ST->hasNEON() && isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) { |
| 8279 | if (ReverseVEXT) |
| 8280 | std::swap(V1, V2); |
| 8281 | return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V2, |
| 8282 | DAG.getConstant(Imm, dl, MVT::i32)); |
| 8283 | } |
| 8284 | |
| 8285 | if (isVREVMask(ShuffleMask, VT, 64)) |
| 8286 | return DAG.getNode(ARMISD::VREV64, dl, VT, V1); |
| 8287 | if (isVREVMask(ShuffleMask, VT, 32)) |
| 8288 | return DAG.getNode(ARMISD::VREV32, dl, VT, V1); |
| 8289 | if (isVREVMask(ShuffleMask, VT, 16)) |
| 8290 | return DAG.getNode(ARMISD::VREV16, dl, VT, V1); |
| 8291 | |
| 8292 | if (ST->hasNEON() && V2->isUndef() && isSingletonVEXTMask(ShuffleMask, VT, Imm)) { |
| 8293 | return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V1, |
| 8294 | DAG.getConstant(Imm, dl, MVT::i32)); |
| 8295 | } |
| 8296 | |
| 8297 | // Check for Neon shuffles that modify both input vectors in place. |
| 8298 | // If both results are used, i.e., if there are two shuffles with the same |
| 8299 | // source operands and with masks corresponding to both results of one of |
| 8300 | // these operations, DAG memoization will ensure that a single node is |
| 8301 | // used for both shuffles. |
| 8302 | unsigned WhichResult = 0; |
| 8303 | bool isV_UNDEF = false; |
| 8304 | if (ST->hasNEON()) { |
| 8305 | if (unsigned ShuffleOpc = isNEONTwoResultShuffleMask( |
| 8306 | ShuffleMask, VT, WhichResult, isV_UNDEF)) { |
| 8307 | if (isV_UNDEF) |
| 8308 | V2 = V1; |
| 8309 | return DAG.getNode(ShuffleOpc, dl, DAG.getVTList(VT, VT), V1, V2) |
| 8310 | .getValue(WhichResult); |
| 8311 | } |
| 8312 | } |
| 8313 | if (ST->hasMVEIntegerOps()) { |
| 8314 | if (isVMOVNMask(ShuffleMask, VT, 0)) |
| 8315 | return DAG.getNode(ARMISD::VMOVN, dl, VT, V2, V1, |
| 8316 | DAG.getConstant(0, dl, MVT::i32)); |
| 8317 | if (isVMOVNMask(ShuffleMask, VT, 1)) |
| 8318 | return DAG.getNode(ARMISD::VMOVN, dl, VT, V1, V2, |
| 8319 | DAG.getConstant(1, dl, MVT::i32)); |
| 8320 | } |
| 8321 | |
| 8322 | // Also check for these shuffles through CONCAT_VECTORS: we canonicalize |
| 8323 | // shuffles that produce a result larger than their operands with: |
| 8324 | // shuffle(concat(v1, undef), concat(v2, undef)) |
| 8325 | // -> |
| 8326 | // shuffle(concat(v1, v2), undef) |
| 8327 | // because we can access quad vectors (see PerformVECTOR_SHUFFLECombine). |
| 8328 | // |
| 8329 | // This is useful in the general case, but there are special cases where |
| 8330 | // native shuffles produce larger results: the two-result ops. |
| 8331 | // |
| 8332 | // Look through the concat when lowering them: |
| 8333 | // shuffle(concat(v1, v2), undef) |
| 8334 | // -> |
| 8335 | // concat(VZIP(v1, v2):0, :1) |
| 8336 | // |
| 8337 | if (ST->hasNEON() && V1->getOpcode() == ISD::CONCAT_VECTORS && V2->isUndef()) { |
| 8338 | SDValue SubV1 = V1->getOperand(0); |
| 8339 | SDValue SubV2 = V1->getOperand(1); |
| 8340 | EVT SubVT = SubV1.getValueType(); |
| 8341 | |
| 8342 | // We expect these to have been canonicalized to -1. |
| 8343 | assert(llvm::all_of(ShuffleMask, [&](int i) { |
| 8344 | return i < (int)VT.getVectorNumElements(); |
| 8345 | }) && "Unexpected shuffle index into UNDEF operand!" ); |
| 8346 | |
| 8347 | if (unsigned ShuffleOpc = isNEONTwoResultShuffleMask( |
| 8348 | ShuffleMask, SubVT, WhichResult, isV_UNDEF)) { |
| 8349 | if (isV_UNDEF) |
| 8350 | SubV2 = SubV1; |
| 8351 | assert((WhichResult == 0) && |
| 8352 | "In-place shuffle of concat can only have one result!" ); |
| 8353 | SDValue Res = DAG.getNode(ShuffleOpc, dl, DAG.getVTList(SubVT, SubVT), |
| 8354 | SubV1, SubV2); |
| 8355 | return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Res.getValue(0), |
| 8356 | Res.getValue(1)); |
| 8357 | } |
| 8358 | } |
| 8359 | } |
| 8360 | |
| 8361 | // If the shuffle is not directly supported and it has 4 elements, use |
| 8362 | // the PerfectShuffle-generated table to synthesize it from other shuffles. |
| 8363 | unsigned NumElts = VT.getVectorNumElements(); |
| 8364 | if (NumElts == 4) { |
| 8365 | unsigned PFIndexes[4]; |
| 8366 | for (unsigned i = 0; i != 4; ++i) { |
| 8367 | if (ShuffleMask[i] < 0) |
| 8368 | PFIndexes[i] = 8; |
| 8369 | else |
| 8370 | PFIndexes[i] = ShuffleMask[i]; |
| 8371 | } |
| 8372 | |
| 8373 | // Compute the index in the perfect shuffle table. |
| 8374 | unsigned PFTableIndex = |
| 8375 | PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; |
| 8376 | unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; |
| 8377 | unsigned Cost = (PFEntry >> 30); |
| 8378 | |
| 8379 | if (Cost <= 4) { |
| 8380 | if (ST->hasNEON()) |
| 8381 | return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); |
| 8382 | else if (isLegalMVEShuffleOp(PFEntry)) { |
| 8383 | unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); |
| 8384 | unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); |
| 8385 | unsigned PFEntryLHS = PerfectShuffleTable[LHSID]; |
| 8386 | unsigned PFEntryRHS = PerfectShuffleTable[RHSID]; |
| 8387 | if (isLegalMVEShuffleOp(PFEntryLHS) && isLegalMVEShuffleOp(PFEntryRHS)) |
| 8388 | return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); |
| 8389 | } |
| 8390 | } |
| 8391 | } |
| 8392 | |
| 8393 | // Implement shuffles with 32- or 64-bit elements as ARMISD::BUILD_VECTORs. |
| 8394 | if (EltSize >= 32) { |
| 8395 | // Do the expansion with floating-point types, since that is what the VFP |
| 8396 | // registers are defined to use, and since i64 is not legal. |
| 8397 | EVT EltVT = EVT::getFloatingPointVT(EltSize); |
| 8398 | EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); |
| 8399 | V1 = DAG.getNode(ISD::BITCAST, dl, VecVT, V1); |
| 8400 | V2 = DAG.getNode(ISD::BITCAST, dl, VecVT, V2); |
| 8401 | SmallVector<SDValue, 8> Ops; |
| 8402 | for (unsigned i = 0; i < NumElts; ++i) { |
| 8403 | if (ShuffleMask[i] < 0) |
| 8404 | Ops.push_back(DAG.getUNDEF(EltVT)); |
| 8405 | else |
| 8406 | Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, |
| 8407 | ShuffleMask[i] < (int)NumElts ? V1 : V2, |
| 8408 | DAG.getConstant(ShuffleMask[i] & (NumElts-1), |
| 8409 | dl, MVT::i32))); |
| 8410 | } |
| 8411 | SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, Ops); |
| 8412 | return DAG.getNode(ISD::BITCAST, dl, VT, Val); |
| 8413 | } |
| 8414 | |
| 8415 | if (ST->hasNEON() && (VT == MVT::v8i16 || VT == MVT::v16i8) && isReverseMask(ShuffleMask, VT)) |
| 8416 | return LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(Op, DAG); |
| 8417 | |
| 8418 | if (ST->hasNEON() && VT == MVT::v8i8) |
| 8419 | if (SDValue NewOp = LowerVECTOR_SHUFFLEv8i8(Op, ShuffleMask, DAG)) |
| 8420 | return NewOp; |
| 8421 | |
| 8422 | if (ST->hasMVEIntegerOps()) |
| 8423 | if (SDValue NewOp = LowerVECTOR_SHUFFLEUsingMovs(Op, ShuffleMask, DAG)) |
| 8424 | return NewOp; |
| 8425 | |
| 8426 | return SDValue(); |
| 8427 | } |
| 8428 | |
| 8429 | static SDValue LowerINSERT_VECTOR_ELT_i1(SDValue Op, SelectionDAG &DAG, |
| 8430 | const ARMSubtarget *ST) { |
| 8431 | EVT VecVT = Op.getOperand(0).getValueType(); |
| 8432 | SDLoc dl(Op); |
| 8433 | |
| 8434 | assert(ST->hasMVEIntegerOps() && |
| 8435 | "LowerINSERT_VECTOR_ELT_i1 called without MVE!" ); |
| 8436 | |
| 8437 | SDValue Conv = |
| 8438 | DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::i32, Op->getOperand(0)); |
| 8439 | unsigned Lane = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); |
| 8440 | unsigned LaneWidth = |
| 8441 | getVectorTyFromPredicateVector(VecVT).getScalarSizeInBits() / 8; |
| 8442 | unsigned Mask = ((1 << LaneWidth) - 1) << Lane * LaneWidth; |
| 8443 | SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::i32, |
| 8444 | Op.getOperand(1), DAG.getValueType(MVT::i1)); |
| 8445 | SDValue BFI = DAG.getNode(ARMISD::BFI, dl, MVT::i32, Conv, Ext, |
| 8446 | DAG.getConstant(~Mask, dl, MVT::i32)); |
| 8447 | return DAG.getNode(ARMISD::PREDICATE_CAST, dl, Op.getValueType(), BFI); |
| 8448 | } |
| 8449 | |
| 8450 | SDValue ARMTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, |
| 8451 | SelectionDAG &DAG) const { |
| 8452 | // INSERT_VECTOR_ELT is legal only for immediate indexes. |
| 8453 | SDValue Lane = Op.getOperand(2); |
| 8454 | if (!isa<ConstantSDNode>(Lane)) |
| 8455 | return SDValue(); |
| 8456 | |
| 8457 | SDValue Elt = Op.getOperand(1); |
| 8458 | EVT EltVT = Elt.getValueType(); |
| 8459 | |
| 8460 | if (Subtarget->hasMVEIntegerOps() && |
| 8461 | Op.getValueType().getScalarSizeInBits() == 1) |
| 8462 | return LowerINSERT_VECTOR_ELT_i1(Op, DAG, Subtarget); |
| 8463 | |
| 8464 | if (getTypeAction(*DAG.getContext(), EltVT) == |
| 8465 | TargetLowering::TypePromoteFloat) { |
| 8466 | // INSERT_VECTOR_ELT doesn't want f16 operands promoting to f32, |
| 8467 | // but the type system will try to do that if we don't intervene. |
| 8468 | // Reinterpret any such vector-element insertion as one with the |
| 8469 | // corresponding integer types. |
| 8470 | |
| 8471 | SDLoc dl(Op); |
| 8472 | |
| 8473 | EVT IEltVT = MVT::getIntegerVT(EltVT.getScalarSizeInBits()); |
| 8474 | assert(getTypeAction(*DAG.getContext(), IEltVT) != |
| 8475 | TargetLowering::TypePromoteFloat); |
| 8476 | |
| 8477 | SDValue VecIn = Op.getOperand(0); |
| 8478 | EVT VecVT = VecIn.getValueType(); |
| 8479 | EVT IVecVT = EVT::getVectorVT(*DAG.getContext(), IEltVT, |
| 8480 | VecVT.getVectorNumElements()); |
| 8481 | |
| 8482 | SDValue IElt = DAG.getNode(ISD::BITCAST, dl, IEltVT, Elt); |
| 8483 | SDValue IVecIn = DAG.getNode(ISD::BITCAST, dl, IVecVT, VecIn); |
| 8484 | SDValue IVecOut = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, IVecVT, |
| 8485 | IVecIn, IElt, Lane); |
| 8486 | return DAG.getNode(ISD::BITCAST, dl, VecVT, IVecOut); |
| 8487 | } |
| 8488 | |
| 8489 | return Op; |
| 8490 | } |
| 8491 | |
| 8492 | static SDValue (SDValue Op, SelectionDAG &DAG, |
| 8493 | const ARMSubtarget *ST) { |
| 8494 | EVT VecVT = Op.getOperand(0).getValueType(); |
| 8495 | SDLoc dl(Op); |
| 8496 | |
| 8497 | assert(ST->hasMVEIntegerOps() && |
| 8498 | "LowerINSERT_VECTOR_ELT_i1 called without MVE!" ); |
| 8499 | |
| 8500 | SDValue Conv = |
| 8501 | DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::i32, Op->getOperand(0)); |
| 8502 | unsigned Lane = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); |
| 8503 | unsigned LaneWidth = |
| 8504 | getVectorTyFromPredicateVector(VecVT).getScalarSizeInBits() / 8; |
| 8505 | SDValue Shift = DAG.getNode(ISD::SRL, dl, MVT::i32, Conv, |
| 8506 | DAG.getConstant(Lane * LaneWidth, dl, MVT::i32)); |
| 8507 | return Shift; |
| 8508 | } |
| 8509 | |
| 8510 | static SDValue (SDValue Op, SelectionDAG &DAG, |
| 8511 | const ARMSubtarget *ST) { |
| 8512 | // EXTRACT_VECTOR_ELT is legal only for immediate indexes. |
| 8513 | SDValue Lane = Op.getOperand(1); |
| 8514 | if (!isa<ConstantSDNode>(Lane)) |
| 8515 | return SDValue(); |
| 8516 | |
| 8517 | SDValue Vec = Op.getOperand(0); |
| 8518 | EVT VT = Vec.getValueType(); |
| 8519 | |
| 8520 | if (ST->hasMVEIntegerOps() && VT.getScalarSizeInBits() == 1) |
| 8521 | return LowerEXTRACT_VECTOR_ELT_i1(Op, DAG, ST); |
| 8522 | |
| 8523 | if (Op.getValueType() == MVT::i32 && Vec.getScalarValueSizeInBits() < 32) { |
| 8524 | SDLoc dl(Op); |
| 8525 | return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane); |
| 8526 | } |
| 8527 | |
| 8528 | return Op; |
| 8529 | } |
| 8530 | |
| 8531 | static SDValue LowerCONCAT_VECTORS_i1(SDValue Op, SelectionDAG &DAG, |
| 8532 | const ARMSubtarget *ST) { |
| 8533 | SDValue V1 = Op.getOperand(0); |
| 8534 | SDValue V2 = Op.getOperand(1); |
| 8535 | SDLoc dl(Op); |
| 8536 | EVT VT = Op.getValueType(); |
| 8537 | EVT Op1VT = V1.getValueType(); |
| 8538 | EVT Op2VT = V2.getValueType(); |
| 8539 | unsigned NumElts = VT.getVectorNumElements(); |
| 8540 | |
| 8541 | assert(Op1VT == Op2VT && "Operand types don't match!" ); |
| 8542 | assert(VT.getScalarSizeInBits() == 1 && |
| 8543 | "Unexpected custom CONCAT_VECTORS lowering" ); |
| 8544 | assert(ST->hasMVEIntegerOps() && |
| 8545 | "CONCAT_VECTORS lowering only supported for MVE" ); |
| 8546 | |
| 8547 | SDValue NewV1 = PromoteMVEPredVector(dl, V1, Op1VT, DAG); |
| 8548 | SDValue NewV2 = PromoteMVEPredVector(dl, V2, Op2VT, DAG); |
| 8549 | |
| 8550 | // We now have Op1 + Op2 promoted to vectors of integers, where v8i1 gets |
| 8551 | // promoted to v8i16, etc. |
| 8552 | |
| 8553 | MVT ElType = getVectorTyFromPredicateVector(VT).getScalarType().getSimpleVT(); |
| 8554 | |
| 8555 | // Extract the vector elements from Op1 and Op2 one by one and truncate them |
| 8556 | // to be the right size for the destination. For example, if Op1 is v4i1 then |
| 8557 | // the promoted vector is v4i32. The result of concatentation gives a v8i1, |
| 8558 | // which when promoted is v8i16. That means each i32 element from Op1 needs |
| 8559 | // truncating to i16 and inserting in the result. |
| 8560 | EVT ConcatVT = MVT::getVectorVT(ElType, NumElts); |
| 8561 | SDValue ConVec = DAG.getNode(ISD::UNDEF, dl, ConcatVT); |
| 8562 | auto ExractInto = [&DAG, &dl](SDValue NewV, SDValue ConVec, unsigned &j) { |
| 8563 | EVT NewVT = NewV.getValueType(); |
| 8564 | EVT ConcatVT = ConVec.getValueType(); |
| 8565 | for (unsigned i = 0, e = NewVT.getVectorNumElements(); i < e; i++, j++) { |
| 8566 | SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, NewV, |
| 8567 | DAG.getIntPtrConstant(i, dl)); |
| 8568 | ConVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ConcatVT, ConVec, Elt, |
| 8569 | DAG.getConstant(j, dl, MVT::i32)); |
| 8570 | } |
| 8571 | return ConVec; |
| 8572 | }; |
| 8573 | unsigned j = 0; |
| 8574 | ConVec = ExractInto(NewV1, ConVec, j); |
| 8575 | ConVec = ExractInto(NewV2, ConVec, j); |
| 8576 | |
| 8577 | // Now return the result of comparing the subvector with zero, |
| 8578 | // which will generate a real predicate, i.e. v4i1, v8i1 or v16i1. |
| 8579 | return DAG.getNode(ARMISD::VCMPZ, dl, VT, ConVec, |
| 8580 | DAG.getConstant(ARMCC::NE, dl, MVT::i32)); |
| 8581 | } |
| 8582 | |
| 8583 | static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG, |
| 8584 | const ARMSubtarget *ST) { |
| 8585 | EVT VT = Op->getValueType(0); |
| 8586 | if (ST->hasMVEIntegerOps() && VT.getScalarSizeInBits() == 1) |
| 8587 | return LowerCONCAT_VECTORS_i1(Op, DAG, ST); |
| 8588 | |
| 8589 | // The only time a CONCAT_VECTORS operation can have legal types is when |
| 8590 | // two 64-bit vectors are concatenated to a 128-bit vector. |
| 8591 | assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 && |
| 8592 | "unexpected CONCAT_VECTORS" ); |
| 8593 | SDLoc dl(Op); |
| 8594 | SDValue Val = DAG.getUNDEF(MVT::v2f64); |
| 8595 | SDValue Op0 = Op.getOperand(0); |
| 8596 | SDValue Op1 = Op.getOperand(1); |
| 8597 | if (!Op0.isUndef()) |
| 8598 | Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, |
| 8599 | DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op0), |
| 8600 | DAG.getIntPtrConstant(0, dl)); |
| 8601 | if (!Op1.isUndef()) |
| 8602 | Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, |
| 8603 | DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op1), |
| 8604 | DAG.getIntPtrConstant(1, dl)); |
| 8605 | return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Val); |
| 8606 | } |
| 8607 | |
| 8608 | static SDValue (SDValue Op, SelectionDAG &DAG, |
| 8609 | const ARMSubtarget *ST) { |
| 8610 | SDValue V1 = Op.getOperand(0); |
| 8611 | SDValue V2 = Op.getOperand(1); |
| 8612 | SDLoc dl(Op); |
| 8613 | EVT VT = Op.getValueType(); |
| 8614 | EVT Op1VT = V1.getValueType(); |
| 8615 | unsigned NumElts = VT.getVectorNumElements(); |
| 8616 | unsigned Index = cast<ConstantSDNode>(V2)->getZExtValue(); |
| 8617 | |
| 8618 | assert(VT.getScalarSizeInBits() == 1 && |
| 8619 | "Unexpected custom EXTRACT_SUBVECTOR lowering" ); |
| 8620 | assert(ST->hasMVEIntegerOps() && |
| 8621 | "EXTRACT_SUBVECTOR lowering only supported for MVE" ); |
| 8622 | |
| 8623 | SDValue NewV1 = PromoteMVEPredVector(dl, V1, Op1VT, DAG); |
| 8624 | |
| 8625 | // We now have Op1 promoted to a vector of integers, where v8i1 gets |
| 8626 | // promoted to v8i16, etc. |
| 8627 | |
| 8628 | MVT ElType = getVectorTyFromPredicateVector(VT).getScalarType().getSimpleVT(); |
| 8629 | |
| 8630 | EVT SubVT = MVT::getVectorVT(ElType, NumElts); |
| 8631 | SDValue SubVec = DAG.getNode(ISD::UNDEF, dl, SubVT); |
| 8632 | for (unsigned i = Index, j = 0; i < (Index + NumElts); i++, j++) { |
| 8633 | SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, NewV1, |
| 8634 | DAG.getIntPtrConstant(i, dl)); |
| 8635 | SubVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, SubVT, SubVec, Elt, |
| 8636 | DAG.getConstant(j, dl, MVT::i32)); |
| 8637 | } |
| 8638 | |
| 8639 | // Now return the result of comparing the subvector with zero, |
| 8640 | // which will generate a real predicate, i.e. v4i1, v8i1 or v16i1. |
| 8641 | return DAG.getNode(ARMISD::VCMPZ, dl, VT, SubVec, |
| 8642 | DAG.getConstant(ARMCC::NE, dl, MVT::i32)); |
| 8643 | } |
| 8644 | |
| 8645 | // Turn a truncate into a predicate (an i1 vector) into icmp(and(x, 1), 0). |
| 8646 | static SDValue LowerTruncatei1(SDValue N, SelectionDAG &DAG, |
| 8647 | const ARMSubtarget *ST) { |
| 8648 | assert(ST->hasMVEIntegerOps() && "Expected MVE!" ); |
| 8649 | EVT VT = N.getValueType(); |
| 8650 | assert((VT == MVT::v16i1 || VT == MVT::v8i1 || VT == MVT::v4i1) && |
| 8651 | "Expected a vector i1 type!" ); |
| 8652 | SDValue Op = N.getOperand(0); |
| 8653 | EVT FromVT = Op.getValueType(); |
| 8654 | SDLoc DL(N); |
| 8655 | |
| 8656 | SDValue And = |
| 8657 | DAG.getNode(ISD::AND, DL, FromVT, Op, DAG.getConstant(1, DL, FromVT)); |
| 8658 | return DAG.getNode(ISD::SETCC, DL, VT, And, DAG.getConstant(0, DL, FromVT), |
| 8659 | DAG.getCondCode(ISD::SETNE)); |
| 8660 | } |
| 8661 | |
| 8662 | /// isExtendedBUILD_VECTOR - Check if N is a constant BUILD_VECTOR where each |
| 8663 | /// element has been zero/sign-extended, depending on the isSigned parameter, |
| 8664 | /// from an integer type half its size. |
| 8665 | static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG, |
| 8666 | bool isSigned) { |
| 8667 | // A v2i64 BUILD_VECTOR will have been legalized to a BITCAST from v4i32. |
| 8668 | EVT VT = N->getValueType(0); |
| 8669 | if (VT == MVT::v2i64 && N->getOpcode() == ISD::BITCAST) { |
| 8670 | SDNode *BVN = N->getOperand(0).getNode(); |
| 8671 | if (BVN->getValueType(0) != MVT::v4i32 || |
| 8672 | BVN->getOpcode() != ISD::BUILD_VECTOR) |
| 8673 | return false; |
| 8674 | unsigned LoElt = DAG.getDataLayout().isBigEndian() ? 1 : 0; |
| 8675 | unsigned HiElt = 1 - LoElt; |
| 8676 | ConstantSDNode *Lo0 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt)); |
| 8677 | ConstantSDNode *Hi0 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt)); |
| 8678 | ConstantSDNode *Lo1 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt+2)); |
| 8679 | ConstantSDNode *Hi1 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt+2)); |
| 8680 | if (!Lo0 || !Hi0 || !Lo1 || !Hi1) |
| 8681 | return false; |
| 8682 | if (isSigned) { |
| 8683 | if (Hi0->getSExtValue() == Lo0->getSExtValue() >> 32 && |
| 8684 | Hi1->getSExtValue() == Lo1->getSExtValue() >> 32) |
| 8685 | return true; |
| 8686 | } else { |
| 8687 | if (Hi0->isNullValue() && Hi1->isNullValue()) |
| 8688 | return true; |
| 8689 | } |
| 8690 | return false; |
| 8691 | } |
| 8692 | |
| 8693 | if (N->getOpcode() != ISD::BUILD_VECTOR) |
| 8694 | return false; |
| 8695 | |
| 8696 | for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { |
| 8697 | SDNode *Elt = N->getOperand(i).getNode(); |
| 8698 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) { |
| 8699 | unsigned EltSize = VT.getScalarSizeInBits(); |
| 8700 | unsigned HalfSize = EltSize / 2; |
| 8701 | if (isSigned) { |
| 8702 | if (!isIntN(HalfSize, C->getSExtValue())) |
| 8703 | return false; |
| 8704 | } else { |
| 8705 | if (!isUIntN(HalfSize, C->getZExtValue())) |
| 8706 | return false; |
| 8707 | } |
| 8708 | continue; |
| 8709 | } |
| 8710 | return false; |
| 8711 | } |
| 8712 | |
| 8713 | return true; |
| 8714 | } |
| 8715 | |
| 8716 | /// isSignExtended - Check if a node is a vector value that is sign-extended |
| 8717 | /// or a constant BUILD_VECTOR with sign-extended elements. |
| 8718 | static bool isSignExtended(SDNode *N, SelectionDAG &DAG) { |
| 8719 | if (N->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N)) |
| 8720 | return true; |
| 8721 | if (isExtendedBUILD_VECTOR(N, DAG, true)) |
| 8722 | return true; |
| 8723 | return false; |
| 8724 | } |
| 8725 | |
| 8726 | /// isZeroExtended - Check if a node is a vector value that is zero-extended (or |
| 8727 | /// any-extended) or a constant BUILD_VECTOR with zero-extended elements. |
| 8728 | static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) { |
| 8729 | if (N->getOpcode() == ISD::ZERO_EXTEND || N->getOpcode() == ISD::ANY_EXTEND || |
| 8730 | ISD::isZEXTLoad(N)) |
| 8731 | return true; |
| 8732 | if (isExtendedBUILD_VECTOR(N, DAG, false)) |
| 8733 | return true; |
| 8734 | return false; |
| 8735 | } |
| 8736 | |
| 8737 | static EVT getExtensionTo64Bits(const EVT &OrigVT) { |
| 8738 | if (OrigVT.getSizeInBits() >= 64) |
| 8739 | return OrigVT; |
| 8740 | |
| 8741 | assert(OrigVT.isSimple() && "Expecting a simple value type" ); |
| 8742 | |
| 8743 | MVT::SimpleValueType OrigSimpleTy = OrigVT.getSimpleVT().SimpleTy; |
| 8744 | switch (OrigSimpleTy) { |
| 8745 | default: llvm_unreachable("Unexpected Vector Type" ); |
| 8746 | case MVT::v2i8: |
| 8747 | case MVT::v2i16: |
| 8748 | return MVT::v2i32; |
| 8749 | case MVT::v4i8: |
| 8750 | return MVT::v4i16; |
| 8751 | } |
| 8752 | } |
| 8753 | |
| 8754 | /// AddRequiredExtensionForVMULL - Add a sign/zero extension to extend the total |
| 8755 | /// value size to 64 bits. We need a 64-bit D register as an operand to VMULL. |
| 8756 | /// We insert the required extension here to get the vector to fill a D register. |
| 8757 | static SDValue AddRequiredExtensionForVMULL(SDValue N, SelectionDAG &DAG, |
| 8758 | const EVT &OrigTy, |
| 8759 | const EVT &ExtTy, |
| 8760 | unsigned ExtOpcode) { |
| 8761 | // The vector originally had a size of OrigTy. It was then extended to ExtTy. |
| 8762 | // We expect the ExtTy to be 128-bits total. If the OrigTy is less than |
| 8763 | // 64-bits we need to insert a new extension so that it will be 64-bits. |
| 8764 | assert(ExtTy.is128BitVector() && "Unexpected extension size" ); |
| 8765 | if (OrigTy.getSizeInBits() >= 64) |
| 8766 | return N; |
| 8767 | |
| 8768 | // Must extend size to at least 64 bits to be used as an operand for VMULL. |
| 8769 | EVT NewVT = getExtensionTo64Bits(OrigTy); |
| 8770 | |
| 8771 | return DAG.getNode(ExtOpcode, SDLoc(N), NewVT, N); |
| 8772 | } |
| 8773 | |
| 8774 | /// SkipLoadExtensionForVMULL - return a load of the original vector size that |
| 8775 | /// does not do any sign/zero extension. If the original vector is less |
| 8776 | /// than 64 bits, an appropriate extension will be added after the load to |
| 8777 | /// reach a total size of 64 bits. We have to add the extension separately |
| 8778 | /// because ARM does not have a sign/zero extending load for vectors. |
| 8779 | static SDValue SkipLoadExtensionForVMULL(LoadSDNode *LD, SelectionDAG& DAG) { |
| 8780 | EVT ExtendedTy = getExtensionTo64Bits(LD->getMemoryVT()); |
| 8781 | |
| 8782 | // The load already has the right type. |
| 8783 | if (ExtendedTy == LD->getMemoryVT()) |
| 8784 | return DAG.getLoad(LD->getMemoryVT(), SDLoc(LD), LD->getChain(), |
| 8785 | LD->getBasePtr(), LD->getPointerInfo(), |
| 8786 | LD->getAlignment(), LD->getMemOperand()->getFlags()); |
| 8787 | |
| 8788 | // We need to create a zextload/sextload. We cannot just create a load |
| 8789 | // followed by a zext/zext node because LowerMUL is also run during normal |
| 8790 | // operation legalization where we can't create illegal types. |
| 8791 | return DAG.getExtLoad(LD->getExtensionType(), SDLoc(LD), ExtendedTy, |
| 8792 | LD->getChain(), LD->getBasePtr(), LD->getPointerInfo(), |
| 8793 | LD->getMemoryVT(), LD->getAlignment(), |
| 8794 | LD->getMemOperand()->getFlags()); |
| 8795 | } |
| 8796 | |
| 8797 | /// SkipExtensionForVMULL - For a node that is a SIGN_EXTEND, ZERO_EXTEND, |
| 8798 | /// ANY_EXTEND, extending load, or BUILD_VECTOR with extended elements, return |
| 8799 | /// the unextended value. The unextended vector should be 64 bits so that it can |
| 8800 | /// be used as an operand to a VMULL instruction. If the original vector size |
| 8801 | /// before extension is less than 64 bits we add a an extension to resize |
| 8802 | /// the vector to 64 bits. |
| 8803 | static SDValue SkipExtensionForVMULL(SDNode *N, SelectionDAG &DAG) { |
| 8804 | if (N->getOpcode() == ISD::SIGN_EXTEND || |
| 8805 | N->getOpcode() == ISD::ZERO_EXTEND || N->getOpcode() == ISD::ANY_EXTEND) |
| 8806 | return AddRequiredExtensionForVMULL(N->getOperand(0), DAG, |
| 8807 | N->getOperand(0)->getValueType(0), |
| 8808 | N->getValueType(0), |
| 8809 | N->getOpcode()); |
| 8810 | |
| 8811 | if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { |
| 8812 | assert((ISD::isSEXTLoad(LD) || ISD::isZEXTLoad(LD)) && |
| 8813 | "Expected extending load" ); |
| 8814 | |
| 8815 | SDValue newLoad = SkipLoadExtensionForVMULL(LD, DAG); |
| 8816 | DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), newLoad.getValue(1)); |
| 8817 | unsigned Opcode = ISD::isSEXTLoad(LD) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; |
| 8818 | SDValue extLoad = |
| 8819 | DAG.getNode(Opcode, SDLoc(newLoad), LD->getValueType(0), newLoad); |
| 8820 | DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 0), extLoad); |
| 8821 | |
| 8822 | return newLoad; |
| 8823 | } |
| 8824 | |
| 8825 | // Otherwise, the value must be a BUILD_VECTOR. For v2i64, it will |
| 8826 | // have been legalized as a BITCAST from v4i32. |
| 8827 | if (N->getOpcode() == ISD::BITCAST) { |
| 8828 | SDNode *BVN = N->getOperand(0).getNode(); |
| 8829 | assert(BVN->getOpcode() == ISD::BUILD_VECTOR && |
| 8830 | BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR" ); |
| 8831 | unsigned LowElt = DAG.getDataLayout().isBigEndian() ? 1 : 0; |
| 8832 | return DAG.getBuildVector( |
| 8833 | MVT::v2i32, SDLoc(N), |
| 8834 | {BVN->getOperand(LowElt), BVN->getOperand(LowElt + 2)}); |
| 8835 | } |
| 8836 | // Construct a new BUILD_VECTOR with elements truncated to half the size. |
| 8837 | assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR" ); |
| 8838 | EVT VT = N->getValueType(0); |
| 8839 | unsigned EltSize = VT.getScalarSizeInBits() / 2; |
| 8840 | unsigned NumElts = VT.getVectorNumElements(); |
| 8841 | MVT TruncVT = MVT::getIntegerVT(EltSize); |
| 8842 | SmallVector<SDValue, 8> Ops; |
| 8843 | SDLoc dl(N); |
| 8844 | for (unsigned i = 0; i != NumElts; ++i) { |
| 8845 | ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(i)); |
| 8846 | const APInt &CInt = C->getAPIntValue(); |
| 8847 | // Element types smaller than 32 bits are not legal, so use i32 elements. |
| 8848 | // The values are implicitly truncated so sext vs. zext doesn't matter. |
| 8849 | Ops.push_back(DAG.getConstant(CInt.zextOrTrunc(32), dl, MVT::i32)); |
| 8850 | } |
| 8851 | return DAG.getBuildVector(MVT::getVectorVT(TruncVT, NumElts), dl, Ops); |
| 8852 | } |
| 8853 | |
| 8854 | static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) { |
| 8855 | unsigned Opcode = N->getOpcode(); |
| 8856 | if (Opcode == ISD::ADD || Opcode == ISD::SUB) { |
| 8857 | SDNode *N0 = N->getOperand(0).getNode(); |
| 8858 | SDNode *N1 = N->getOperand(1).getNode(); |
| 8859 | return N0->hasOneUse() && N1->hasOneUse() && |
| 8860 | isSignExtended(N0, DAG) && isSignExtended(N1, DAG); |
| 8861 | } |
| 8862 | return false; |
| 8863 | } |
| 8864 | |
| 8865 | static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG) { |
| 8866 | unsigned Opcode = N->getOpcode(); |
| 8867 | if (Opcode == ISD::ADD || Opcode == ISD::SUB) { |
| 8868 | SDNode *N0 = N->getOperand(0).getNode(); |
| 8869 | SDNode *N1 = N->getOperand(1).getNode(); |
| 8870 | return N0->hasOneUse() && N1->hasOneUse() && |
| 8871 | isZeroExtended(N0, DAG) && isZeroExtended(N1, DAG); |
| 8872 | } |
| 8873 | return false; |
| 8874 | } |
| 8875 | |
| 8876 | static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) { |
| 8877 | // Multiplications are only custom-lowered for 128-bit vectors so that |
| 8878 | // VMULL can be detected. Otherwise v2i64 multiplications are not legal. |
| 8879 | EVT VT = Op.getValueType(); |
| 8880 | assert(VT.is128BitVector() && VT.isInteger() && |
| 8881 | "unexpected type for custom-lowering ISD::MUL" ); |
| 8882 | SDNode *N0 = Op.getOperand(0).getNode(); |
| 8883 | SDNode *N1 = Op.getOperand(1).getNode(); |
| 8884 | unsigned NewOpc = 0; |
| 8885 | bool isMLA = false; |
| 8886 | bool isN0SExt = isSignExtended(N0, DAG); |
| 8887 | bool isN1SExt = isSignExtended(N1, DAG); |
| 8888 | if (isN0SExt && isN1SExt) |
| 8889 | NewOpc = ARMISD::VMULLs; |
| 8890 | else { |
| 8891 | bool isN0ZExt = isZeroExtended(N0, DAG); |
| 8892 | bool isN1ZExt = isZeroExtended(N1, DAG); |
| 8893 | if (isN0ZExt && isN1ZExt) |
| 8894 | NewOpc = ARMISD::VMULLu; |
| 8895 | else if (isN1SExt || isN1ZExt) { |
| 8896 | // Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these |
| 8897 | // into (s/zext A * s/zext C) + (s/zext B * s/zext C) |
| 8898 | if (isN1SExt && isAddSubSExt(N0, DAG)) { |
| 8899 | NewOpc = ARMISD::VMULLs; |
| 8900 | isMLA = true; |
| 8901 | } else if (isN1ZExt && isAddSubZExt(N0, DAG)) { |
| 8902 | NewOpc = ARMISD::VMULLu; |
| 8903 | isMLA = true; |
| 8904 | } else if (isN0ZExt && isAddSubZExt(N1, DAG)) { |
| 8905 | std::swap(N0, N1); |
| 8906 | NewOpc = ARMISD::VMULLu; |
| 8907 | isMLA = true; |
| 8908 | } |
| 8909 | } |
| 8910 | |
| 8911 | if (!NewOpc) { |
| 8912 | if (VT == MVT::v2i64) |
| 8913 | // Fall through to expand this. It is not legal. |
| 8914 | return SDValue(); |
| 8915 | else |
| 8916 | // Other vector multiplications are legal. |
| 8917 | return Op; |
| 8918 | } |
| 8919 | } |
| 8920 | |
| 8921 | // Legalize to a VMULL instruction. |
| 8922 | SDLoc DL(Op); |
| 8923 | SDValue Op0; |
| 8924 | SDValue Op1 = SkipExtensionForVMULL(N1, DAG); |
| 8925 | if (!isMLA) { |
| 8926 | Op0 = SkipExtensionForVMULL(N0, DAG); |
| 8927 | assert(Op0.getValueType().is64BitVector() && |
| 8928 | Op1.getValueType().is64BitVector() && |
| 8929 | "unexpected types for extended operands to VMULL" ); |
| 8930 | return DAG.getNode(NewOpc, DL, VT, Op0, Op1); |
| 8931 | } |
| 8932 | |
| 8933 | // Optimizing (zext A + zext B) * C, to (VMULL A, C) + (VMULL B, C) during |
| 8934 | // isel lowering to take advantage of no-stall back to back vmul + vmla. |
| 8935 | // vmull q0, d4, d6 |
| 8936 | // vmlal q0, d5, d6 |
| 8937 | // is faster than |
| 8938 | // vaddl q0, d4, d5 |
| 8939 | // vmovl q1, d6 |
| 8940 | // vmul q0, q0, q1 |
| 8941 | SDValue N00 = SkipExtensionForVMULL(N0->getOperand(0).getNode(), DAG); |
| 8942 | SDValue N01 = SkipExtensionForVMULL(N0->getOperand(1).getNode(), DAG); |
| 8943 | EVT Op1VT = Op1.getValueType(); |
| 8944 | return DAG.getNode(N0->getOpcode(), DL, VT, |
| 8945 | DAG.getNode(NewOpc, DL, VT, |
| 8946 | DAG.getNode(ISD::BITCAST, DL, Op1VT, N00), Op1), |
| 8947 | DAG.getNode(NewOpc, DL, VT, |
| 8948 | DAG.getNode(ISD::BITCAST, DL, Op1VT, N01), Op1)); |
| 8949 | } |
| 8950 | |
| 8951 | static SDValue LowerSDIV_v4i8(SDValue X, SDValue Y, const SDLoc &dl, |
| 8952 | SelectionDAG &DAG) { |
| 8953 | // TODO: Should this propagate fast-math-flags? |
| 8954 | |
| 8955 | // Convert to float |
| 8956 | // float4 xf = vcvt_f32_s32(vmovl_s16(a.lo)); |
| 8957 | // float4 yf = vcvt_f32_s32(vmovl_s16(b.lo)); |
| 8958 | X = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, X); |
| 8959 | Y = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Y); |
| 8960 | X = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, X); |
| 8961 | Y = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, Y); |
| 8962 | // Get reciprocal estimate. |
| 8963 | // float4 recip = vrecpeq_f32(yf); |
| 8964 | Y = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, |
| 8965 | DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32), |
| 8966 | Y); |
| 8967 | // Because char has a smaller range than uchar, we can actually get away |
| 8968 | // without any newton steps. This requires that we use a weird bias |
| 8969 | // of 0xb000, however (again, this has been exhaustively tested). |
| 8970 | // float4 result = as_float4(as_int4(xf*recip) + 0xb000); |
| 8971 | X = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, X, Y); |
| 8972 | X = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, X); |
| 8973 | Y = DAG.getConstant(0xb000, dl, MVT::v4i32); |
| 8974 | X = DAG.getNode(ISD::ADD, dl, MVT::v4i32, X, Y); |
| 8975 | X = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, X); |
| 8976 | // Convert back to short. |
| 8977 | X = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, X); |
| 8978 | X = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, X); |
| 8979 | return X; |
| 8980 | } |
| 8981 | |
| 8982 | static SDValue LowerSDIV_v4i16(SDValue N0, SDValue N1, const SDLoc &dl, |
| 8983 | SelectionDAG &DAG) { |
| 8984 | // TODO: Should this propagate fast-math-flags? |
| 8985 | |
| 8986 | SDValue N2; |
| 8987 | // Convert to float. |
| 8988 | // float4 yf = vcvt_f32_s32(vmovl_s16(y)); |
| 8989 | // float4 xf = vcvt_f32_s32(vmovl_s16(x)); |
| 8990 | N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N0); |
| 8991 | N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N1); |
| 8992 | N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0); |
| 8993 | N1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1); |
| 8994 | |
| 8995 | // Use reciprocal estimate and one refinement step. |
| 8996 | // float4 recip = vrecpeq_f32(yf); |
| 8997 | // recip *= vrecpsq_f32(yf, recip); |
| 8998 | N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, |
| 8999 | DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32), |
| 9000 | N1); |
| 9001 | N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, |
| 9002 | DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32), |
| 9003 | N1, N2); |
| 9004 | N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); |
| 9005 | // Because short has a smaller range than ushort, we can actually get away |
| 9006 | // with only a single newton step. This requires that we use a weird bias |
| 9007 | // of 89, however (again, this has been exhaustively tested). |
| 9008 | // float4 result = as_float4(as_int4(xf*recip) + 0x89); |
| 9009 | N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2); |
| 9010 | N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0); |
| 9011 | N1 = DAG.getConstant(0x89, dl, MVT::v4i32); |
| 9012 | N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1); |
| 9013 | N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0); |
| 9014 | // Convert back to integer and return. |
| 9015 | // return vmovn_s32(vcvt_s32_f32(result)); |
| 9016 | N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0); |
| 9017 | N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0); |
| 9018 | return N0; |
| 9019 | } |
| 9020 | |
| 9021 | static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG, |
| 9022 | const ARMSubtarget *ST) { |
| 9023 | EVT VT = Op.getValueType(); |
| 9024 | assert((VT == MVT::v4i16 || VT == MVT::v8i8) && |
| 9025 | "unexpected type for custom-lowering ISD::SDIV" ); |
| 9026 | |
| 9027 | SDLoc dl(Op); |
| 9028 | SDValue N0 = Op.getOperand(0); |
| 9029 | SDValue N1 = Op.getOperand(1); |
| 9030 | SDValue N2, N3; |
| 9031 | |
| 9032 | if (VT == MVT::v8i8) { |
| 9033 | N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N0); |
| 9034 | N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N1); |
| 9035 | |
| 9036 | N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, |
| 9037 | DAG.getIntPtrConstant(4, dl)); |
| 9038 | N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, |
| 9039 | DAG.getIntPtrConstant(4, dl)); |
| 9040 | N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, |
| 9041 | DAG.getIntPtrConstant(0, dl)); |
| 9042 | N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, |
| 9043 | DAG.getIntPtrConstant(0, dl)); |
| 9044 | |
| 9045 | N0 = LowerSDIV_v4i8(N0, N1, dl, DAG); // v4i16 |
| 9046 | N2 = LowerSDIV_v4i8(N2, N3, dl, DAG); // v4i16 |
| 9047 | |
| 9048 | N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2); |
| 9049 | N0 = LowerCONCAT_VECTORS(N0, DAG, ST); |
| 9050 | |
| 9051 | N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v8i8, N0); |
| 9052 | return N0; |
| 9053 | } |
| 9054 | return LowerSDIV_v4i16(N0, N1, dl, DAG); |
| 9055 | } |
| 9056 | |
| 9057 | static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG, |
| 9058 | const ARMSubtarget *ST) { |
| 9059 | // TODO: Should this propagate fast-math-flags? |
| 9060 | EVT VT = Op.getValueType(); |
| 9061 | assert((VT == MVT::v4i16 || VT == MVT::v8i8) && |
| 9062 | "unexpected type for custom-lowering ISD::UDIV" ); |
| 9063 | |
| 9064 | SDLoc dl(Op); |
| 9065 | SDValue N0 = Op.getOperand(0); |
| 9066 | SDValue N1 = Op.getOperand(1); |
| 9067 | SDValue N2, N3; |
| 9068 | |
| 9069 | if (VT == MVT::v8i8) { |
| 9070 | N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N0); |
| 9071 | N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N1); |
| 9072 | |
| 9073 | N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, |
| 9074 | DAG.getIntPtrConstant(4, dl)); |
| 9075 | N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, |
| 9076 | DAG.getIntPtrConstant(4, dl)); |
| 9077 | N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, |
| 9078 | DAG.getIntPtrConstant(0, dl)); |
| 9079 | N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, |
| 9080 | DAG.getIntPtrConstant(0, dl)); |
| 9081 | |
| 9082 | N0 = LowerSDIV_v4i16(N0, N1, dl, DAG); // v4i16 |
| 9083 | N2 = LowerSDIV_v4i16(N2, N3, dl, DAG); // v4i16 |
| 9084 | |
| 9085 | N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2); |
| 9086 | N0 = LowerCONCAT_VECTORS(N0, DAG, ST); |
| 9087 | |
| 9088 | N0 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v8i8, |
| 9089 | DAG.getConstant(Intrinsic::arm_neon_vqmovnsu, dl, |
| 9090 | MVT::i32), |
| 9091 | N0); |
| 9092 | return N0; |
| 9093 | } |
| 9094 | |
| 9095 | // v4i16 sdiv ... Convert to float. |
| 9096 | // float4 yf = vcvt_f32_s32(vmovl_u16(y)); |
| 9097 | // float4 xf = vcvt_f32_s32(vmovl_u16(x)); |
| 9098 | N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N0); |
| 9099 | N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N1); |
| 9100 | N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0); |
| 9101 | SDValue BN1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1); |
| 9102 | |
| 9103 | // Use reciprocal estimate and two refinement steps. |
| 9104 | // float4 recip = vrecpeq_f32(yf); |
| 9105 | // recip *= vrecpsq_f32(yf, recip); |
| 9106 | // recip *= vrecpsq_f32(yf, recip); |
| 9107 | N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, |
| 9108 | DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32), |
| 9109 | BN1); |
| 9110 | N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, |
| 9111 | DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32), |
| 9112 | BN1, N2); |
| 9113 | N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); |
| 9114 | N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, |
| 9115 | DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32), |
| 9116 | BN1, N2); |
| 9117 | N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); |
| 9118 | // Simply multiplying by the reciprocal estimate can leave us a few ulps |
| 9119 | // too low, so we add 2 ulps (exhaustive testing shows that this is enough, |
| 9120 | // and that it will never cause us to return an answer too large). |
| 9121 | // float4 result = as_float4(as_int4(xf*recip) + 2); |
| 9122 | N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2); |
| 9123 | N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0); |
| 9124 | N1 = DAG.getConstant(2, dl, MVT::v4i32); |
| 9125 | N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1); |
| 9126 | N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0); |
| 9127 | // Convert back to integer and return. |
| 9128 | // return vmovn_u32(vcvt_s32_f32(result)); |
| 9129 | N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0); |
| 9130 | N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0); |
| 9131 | return N0; |
| 9132 | } |
| 9133 | |
| 9134 | static SDValue LowerADDSUBCARRY(SDValue Op, SelectionDAG &DAG) { |
| 9135 | SDNode *N = Op.getNode(); |
| 9136 | EVT VT = N->getValueType(0); |
| 9137 | SDVTList VTs = DAG.getVTList(VT, MVT::i32); |
| 9138 | |
| 9139 | SDValue Carry = Op.getOperand(2); |
| 9140 | |
| 9141 | SDLoc DL(Op); |
| 9142 | |
| 9143 | SDValue Result; |
| 9144 | if (Op.getOpcode() == ISD::ADDCARRY) { |
| 9145 | // This converts the boolean value carry into the carry flag. |
| 9146 | Carry = ConvertBooleanCarryToCarryFlag(Carry, DAG); |
| 9147 | |
| 9148 | // Do the addition proper using the carry flag we wanted. |
| 9149 | Result = DAG.getNode(ARMISD::ADDE, DL, VTs, Op.getOperand(0), |
| 9150 | Op.getOperand(1), Carry); |
| 9151 | |
| 9152 | // Now convert the carry flag into a boolean value. |
| 9153 | Carry = ConvertCarryFlagToBooleanCarry(Result.getValue(1), VT, DAG); |
| 9154 | } else { |
| 9155 | // ARMISD::SUBE expects a carry not a borrow like ISD::SUBCARRY so we |
| 9156 | // have to invert the carry first. |
| 9157 | Carry = DAG.getNode(ISD::SUB, DL, MVT::i32, |
| 9158 | DAG.getConstant(1, DL, MVT::i32), Carry); |
| 9159 | // This converts the boolean value carry into the carry flag. |
| 9160 | Carry = ConvertBooleanCarryToCarryFlag(Carry, DAG); |
| 9161 | |
| 9162 | // Do the subtraction proper using the carry flag we wanted. |
| 9163 | Result = DAG.getNode(ARMISD::SUBE, DL, VTs, Op.getOperand(0), |
| 9164 | Op.getOperand(1), Carry); |
| 9165 | |
| 9166 | // Now convert the carry flag into a boolean value. |
| 9167 | Carry = ConvertCarryFlagToBooleanCarry(Result.getValue(1), VT, DAG); |
| 9168 | // But the carry returned by ARMISD::SUBE is not a borrow as expected |
| 9169 | // by ISD::SUBCARRY, so compute 1 - C. |
| 9170 | Carry = DAG.getNode(ISD::SUB, DL, MVT::i32, |
| 9171 | DAG.getConstant(1, DL, MVT::i32), Carry); |
| 9172 | } |
| 9173 | |
| 9174 | // Return both values. |
| 9175 | return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Result, Carry); |
| 9176 | } |
| 9177 | |
| 9178 | SDValue ARMTargetLowering::LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const { |
| 9179 | assert(Subtarget->isTargetDarwin()); |
| 9180 | |
| 9181 | // For iOS, we want to call an alternative entry point: __sincos_stret, |
| 9182 | // return values are passed via sret. |
| 9183 | SDLoc dl(Op); |
| 9184 | SDValue Arg = Op.getOperand(0); |
| 9185 | EVT ArgVT = Arg.getValueType(); |
| 9186 | Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); |
| 9187 | auto PtrVT = getPointerTy(DAG.getDataLayout()); |
| 9188 | |
| 9189 | MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); |
| 9190 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 9191 | |
| 9192 | // Pair of floats / doubles used to pass the result. |
| 9193 | Type *RetTy = StructType::get(ArgTy, ArgTy); |
| 9194 | auto &DL = DAG.getDataLayout(); |
| 9195 | |
| 9196 | ArgListTy Args; |
| 9197 | bool ShouldUseSRet = Subtarget->isAPCS_ABI(); |
| 9198 | SDValue SRet; |
| 9199 | if (ShouldUseSRet) { |
| 9200 | // Create stack object for sret. |
| 9201 | const uint64_t ByteSize = DL.getTypeAllocSize(RetTy); |
| 9202 | const Align StackAlign = DL.getPrefTypeAlign(RetTy); |
| 9203 | int FrameIdx = MFI.CreateStackObject(ByteSize, StackAlign, false); |
| 9204 | SRet = DAG.getFrameIndex(FrameIdx, TLI.getPointerTy(DL)); |
| 9205 | |
| 9206 | ArgListEntry Entry; |
| 9207 | Entry.Node = SRet; |
| 9208 | Entry.Ty = RetTy->getPointerTo(); |
| 9209 | Entry.IsSExt = false; |
| 9210 | Entry.IsZExt = false; |
| 9211 | Entry.IsSRet = true; |
| 9212 | Args.push_back(Entry); |
| 9213 | RetTy = Type::getVoidTy(*DAG.getContext()); |
| 9214 | } |
| 9215 | |
| 9216 | ArgListEntry Entry; |
| 9217 | Entry.Node = Arg; |
| 9218 | Entry.Ty = ArgTy; |
| 9219 | Entry.IsSExt = false; |
| 9220 | Entry.IsZExt = false; |
| 9221 | Args.push_back(Entry); |
| 9222 | |
| 9223 | RTLIB::Libcall LC = |
| 9224 | (ArgVT == MVT::f64) ? RTLIB::SINCOS_STRET_F64 : RTLIB::SINCOS_STRET_F32; |
| 9225 | const char *LibcallName = getLibcallName(LC); |
| 9226 | CallingConv::ID CC = getLibcallCallingConv(LC); |
| 9227 | SDValue Callee = DAG.getExternalSymbol(LibcallName, getPointerTy(DL)); |
| 9228 | |
| 9229 | TargetLowering::CallLoweringInfo CLI(DAG); |
| 9230 | CLI.setDebugLoc(dl) |
| 9231 | .setChain(DAG.getEntryNode()) |
| 9232 | .setCallee(CC, RetTy, Callee, std::move(Args)) |
| 9233 | .setDiscardResult(ShouldUseSRet); |
| 9234 | std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); |
| 9235 | |
| 9236 | if (!ShouldUseSRet) |
| 9237 | return CallResult.first; |
| 9238 | |
| 9239 | SDValue LoadSin = |
| 9240 | DAG.getLoad(ArgVT, dl, CallResult.second, SRet, MachinePointerInfo()); |
| 9241 | |
| 9242 | // Address of cos field. |
| 9243 | SDValue Add = DAG.getNode(ISD::ADD, dl, PtrVT, SRet, |
| 9244 | DAG.getIntPtrConstant(ArgVT.getStoreSize(), dl)); |
| 9245 | SDValue LoadCos = |
| 9246 | DAG.getLoad(ArgVT, dl, LoadSin.getValue(1), Add, MachinePointerInfo()); |
| 9247 | |
| 9248 | SDVTList Tys = DAG.getVTList(ArgVT, ArgVT); |
| 9249 | return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, |
| 9250 | LoadSin.getValue(0), LoadCos.getValue(0)); |
| 9251 | } |
| 9252 | |
| 9253 | SDValue ARMTargetLowering::LowerWindowsDIVLibCall(SDValue Op, SelectionDAG &DAG, |
| 9254 | bool Signed, |
| 9255 | SDValue &Chain) const { |
| 9256 | EVT VT = Op.getValueType(); |
| 9257 | assert((VT == MVT::i32 || VT == MVT::i64) && |
| 9258 | "unexpected type for custom lowering DIV" ); |
| 9259 | SDLoc dl(Op); |
| 9260 | |
| 9261 | const auto &DL = DAG.getDataLayout(); |
| 9262 | const auto &TLI = DAG.getTargetLoweringInfo(); |
| 9263 | |
| 9264 | const char *Name = nullptr; |
| 9265 | if (Signed) |
| 9266 | Name = (VT == MVT::i32) ? "__rt_sdiv" : "__rt_sdiv64" ; |
| 9267 | else |
| 9268 | Name = (VT == MVT::i32) ? "__rt_udiv" : "__rt_udiv64" ; |
| 9269 | |
| 9270 | SDValue ES = DAG.getExternalSymbol(Name, TLI.getPointerTy(DL)); |
| 9271 | |
| 9272 | ARMTargetLowering::ArgListTy Args; |
| 9273 | |
| 9274 | for (auto AI : {1, 0}) { |
| 9275 | ArgListEntry Arg; |
| 9276 | Arg.Node = Op.getOperand(AI); |
| 9277 | Arg.Ty = Arg.Node.getValueType().getTypeForEVT(*DAG.getContext()); |
| 9278 | Args.push_back(Arg); |
| 9279 | } |
| 9280 | |
| 9281 | CallLoweringInfo CLI(DAG); |
| 9282 | CLI.setDebugLoc(dl) |
| 9283 | .setChain(Chain) |
| 9284 | .setCallee(CallingConv::ARM_AAPCS_VFP, VT.getTypeForEVT(*DAG.getContext()), |
| 9285 | ES, std::move(Args)); |
| 9286 | |
| 9287 | return LowerCallTo(CLI).first; |
| 9288 | } |
| 9289 | |
| 9290 | // This is a code size optimisation: return the original SDIV node to |
| 9291 | // DAGCombiner when we don't want to expand SDIV into a sequence of |
| 9292 | // instructions, and an empty node otherwise which will cause the |
| 9293 | // SDIV to be expanded in DAGCombine. |
| 9294 | SDValue |
| 9295 | ARMTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, |
| 9296 | SelectionDAG &DAG, |
| 9297 | SmallVectorImpl<SDNode *> &Created) const { |
| 9298 | // TODO: Support SREM |
| 9299 | if (N->getOpcode() != ISD::SDIV) |
| 9300 | return SDValue(); |
| 9301 | |
| 9302 | const auto &ST = static_cast<const ARMSubtarget&>(DAG.getSubtarget()); |
| 9303 | const bool MinSize = ST.hasMinSize(); |
| 9304 | const bool HasDivide = ST.isThumb() ? ST.hasDivideInThumbMode() |
| 9305 | : ST.hasDivideInARMMode(); |
| 9306 | |
| 9307 | // Don't touch vector types; rewriting this may lead to scalarizing |
| 9308 | // the int divs. |
| 9309 | if (N->getOperand(0).getValueType().isVector()) |
| 9310 | return SDValue(); |
| 9311 | |
| 9312 | // Bail if MinSize is not set, and also for both ARM and Thumb mode we need |
| 9313 | // hwdiv support for this to be really profitable. |
| 9314 | if (!(MinSize && HasDivide)) |
| 9315 | return SDValue(); |
| 9316 | |
| 9317 | // ARM mode is a bit simpler than Thumb: we can handle large power |
| 9318 | // of 2 immediates with 1 mov instruction; no further checks required, |
| 9319 | // just return the sdiv node. |
| 9320 | if (!ST.isThumb()) |
| 9321 | return SDValue(N, 0); |
| 9322 | |
| 9323 | // In Thumb mode, immediates larger than 128 need a wide 4-byte MOV, |
| 9324 | // and thus lose the code size benefits of a MOVS that requires only 2. |
| 9325 | // TargetTransformInfo and 'getIntImmCodeSizeCost' could be helpful here, |
| 9326 | // but as it's doing exactly this, it's not worth the trouble to get TTI. |
| 9327 | if (Divisor.sgt(128)) |
| 9328 | return SDValue(); |
| 9329 | |
| 9330 | return SDValue(N, 0); |
| 9331 | } |
| 9332 | |
| 9333 | SDValue ARMTargetLowering::LowerDIV_Windows(SDValue Op, SelectionDAG &DAG, |
| 9334 | bool Signed) const { |
| 9335 | assert(Op.getValueType() == MVT::i32 && |
| 9336 | "unexpected type for custom lowering DIV" ); |
| 9337 | SDLoc dl(Op); |
| 9338 | |
| 9339 | SDValue DBZCHK = DAG.getNode(ARMISD::WIN__DBZCHK, dl, MVT::Other, |
| 9340 | DAG.getEntryNode(), Op.getOperand(1)); |
| 9341 | |
| 9342 | return LowerWindowsDIVLibCall(Op, DAG, Signed, DBZCHK); |
| 9343 | } |
| 9344 | |
| 9345 | static SDValue WinDBZCheckDenominator(SelectionDAG &DAG, SDNode *N, SDValue InChain) { |
| 9346 | SDLoc DL(N); |
| 9347 | SDValue Op = N->getOperand(1); |
| 9348 | if (N->getValueType(0) == MVT::i32) |
| 9349 | return DAG.getNode(ARMISD::WIN__DBZCHK, DL, MVT::Other, InChain, Op); |
| 9350 | SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Op, |
| 9351 | DAG.getConstant(0, DL, MVT::i32)); |
| 9352 | SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Op, |
| 9353 | DAG.getConstant(1, DL, MVT::i32)); |
| 9354 | return DAG.getNode(ARMISD::WIN__DBZCHK, DL, MVT::Other, InChain, |
| 9355 | DAG.getNode(ISD::OR, DL, MVT::i32, Lo, Hi)); |
| 9356 | } |
| 9357 | |
| 9358 | void ARMTargetLowering::ExpandDIV_Windows( |
| 9359 | SDValue Op, SelectionDAG &DAG, bool Signed, |
| 9360 | SmallVectorImpl<SDValue> &Results) const { |
| 9361 | const auto &DL = DAG.getDataLayout(); |
| 9362 | const auto &TLI = DAG.getTargetLoweringInfo(); |
| 9363 | |
| 9364 | assert(Op.getValueType() == MVT::i64 && |
| 9365 | "unexpected type for custom lowering DIV" ); |
| 9366 | SDLoc dl(Op); |
| 9367 | |
| 9368 | SDValue DBZCHK = WinDBZCheckDenominator(DAG, Op.getNode(), DAG.getEntryNode()); |
| 9369 | |
| 9370 | SDValue Result = LowerWindowsDIVLibCall(Op, DAG, Signed, DBZCHK); |
| 9371 | |
| 9372 | SDValue Lower = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Result); |
| 9373 | SDValue Upper = DAG.getNode(ISD::SRL, dl, MVT::i64, Result, |
| 9374 | DAG.getConstant(32, dl, TLI.getPointerTy(DL))); |
| 9375 | Upper = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Upper); |
| 9376 | |
| 9377 | Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lower, Upper)); |
| 9378 | } |
| 9379 | |
| 9380 | static SDValue LowerPredicateLoad(SDValue Op, SelectionDAG &DAG) { |
| 9381 | LoadSDNode *LD = cast<LoadSDNode>(Op.getNode()); |
| 9382 | EVT MemVT = LD->getMemoryVT(); |
| 9383 | assert((MemVT == MVT::v4i1 || MemVT == MVT::v8i1 || MemVT == MVT::v16i1) && |
| 9384 | "Expected a predicate type!" ); |
| 9385 | assert(MemVT == Op.getValueType()); |
| 9386 | assert(LD->getExtensionType() == ISD::NON_EXTLOAD && |
| 9387 | "Expected a non-extending load" ); |
| 9388 | assert(LD->isUnindexed() && "Expected a unindexed load" ); |
| 9389 | |
| 9390 | // The basic MVE VLDR on a v4i1/v8i1 actually loads the entire 16bit |
| 9391 | // predicate, with the "v4i1" bits spread out over the 16 bits loaded. We |
| 9392 | // need to make sure that 8/4 bits are actually loaded into the correct |
| 9393 | // place, which means loading the value and then shuffling the values into |
| 9394 | // the bottom bits of the predicate. |
| 9395 | // Equally, VLDR for an v16i1 will actually load 32bits (so will be incorrect |
| 9396 | // for BE). |
| 9397 | |
| 9398 | SDLoc dl(Op); |
| 9399 | SDValue Load = DAG.getExtLoad( |
| 9400 | ISD::EXTLOAD, dl, MVT::i32, LD->getChain(), LD->getBasePtr(), |
| 9401 | EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits()), |
| 9402 | LD->getMemOperand()); |
| 9403 | SDValue Pred = DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::v16i1, Load); |
| 9404 | if (MemVT != MVT::v16i1) |
| 9405 | Pred = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MemVT, Pred, |
| 9406 | DAG.getConstant(0, dl, MVT::i32)); |
| 9407 | return DAG.getMergeValues({Pred, Load.getValue(1)}, dl); |
| 9408 | } |
| 9409 | |
| 9410 | void ARMTargetLowering::LowerLOAD(SDNode *N, SmallVectorImpl<SDValue> &Results, |
| 9411 | SelectionDAG &DAG) const { |
| 9412 | LoadSDNode *LD = cast<LoadSDNode>(N); |
| 9413 | EVT MemVT = LD->getMemoryVT(); |
| 9414 | assert(LD->isUnindexed() && "Loads should be unindexed at this point." ); |
| 9415 | |
| 9416 | if (MemVT == MVT::i64 && Subtarget->hasV5TEOps() && |
| 9417 | !Subtarget->isThumb1Only() && LD->isVolatile()) { |
| 9418 | SDLoc dl(N); |
| 9419 | SDValue Result = DAG.getMemIntrinsicNode( |
| 9420 | ARMISD::LDRD, dl, DAG.getVTList({MVT::i32, MVT::i32, MVT::Other}), |
| 9421 | {LD->getChain(), LD->getBasePtr()}, MemVT, LD->getMemOperand()); |
| 9422 | SDValue Lo = Result.getValue(DAG.getDataLayout().isLittleEndian() ? 0 : 1); |
| 9423 | SDValue Hi = Result.getValue(DAG.getDataLayout().isLittleEndian() ? 1 : 0); |
| 9424 | SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); |
| 9425 | Results.append({Pair, Result.getValue(2)}); |
| 9426 | } |
| 9427 | } |
| 9428 | |
| 9429 | static SDValue LowerPredicateStore(SDValue Op, SelectionDAG &DAG) { |
| 9430 | StoreSDNode *ST = cast<StoreSDNode>(Op.getNode()); |
| 9431 | EVT MemVT = ST->getMemoryVT(); |
| 9432 | assert((MemVT == MVT::v4i1 || MemVT == MVT::v8i1 || MemVT == MVT::v16i1) && |
| 9433 | "Expected a predicate type!" ); |
| 9434 | assert(MemVT == ST->getValue().getValueType()); |
| 9435 | assert(!ST->isTruncatingStore() && "Expected a non-extending store" ); |
| 9436 | assert(ST->isUnindexed() && "Expected a unindexed store" ); |
| 9437 | |
| 9438 | // Only store the v4i1 or v8i1 worth of bits, via a buildvector with top bits |
| 9439 | // unset and a scalar store. |
| 9440 | SDLoc dl(Op); |
| 9441 | SDValue Build = ST->getValue(); |
| 9442 | if (MemVT != MVT::v16i1) { |
| 9443 | SmallVector<SDValue, 16> Ops; |
| 9444 | for (unsigned I = 0; I < MemVT.getVectorNumElements(); I++) |
| 9445 | Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, Build, |
| 9446 | DAG.getConstant(I, dl, MVT::i32))); |
| 9447 | for (unsigned I = MemVT.getVectorNumElements(); I < 16; I++) |
| 9448 | Ops.push_back(DAG.getUNDEF(MVT::i32)); |
| 9449 | Build = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i1, Ops); |
| 9450 | } |
| 9451 | SDValue GRP = DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::i32, Build); |
| 9452 | return DAG.getTruncStore( |
| 9453 | ST->getChain(), dl, GRP, ST->getBasePtr(), |
| 9454 | EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits()), |
| 9455 | ST->getMemOperand()); |
| 9456 | } |
| 9457 | |
| 9458 | static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG, |
| 9459 | const ARMSubtarget *Subtarget) { |
| 9460 | StoreSDNode *ST = cast<StoreSDNode>(Op.getNode()); |
| 9461 | EVT MemVT = ST->getMemoryVT(); |
| 9462 | assert(ST->isUnindexed() && "Stores should be unindexed at this point." ); |
| 9463 | |
| 9464 | if (MemVT == MVT::i64 && Subtarget->hasV5TEOps() && |
| 9465 | !Subtarget->isThumb1Only() && ST->isVolatile()) { |
| 9466 | SDNode *N = Op.getNode(); |
| 9467 | SDLoc dl(N); |
| 9468 | |
| 9469 | SDValue Lo = DAG.getNode( |
| 9470 | ISD::EXTRACT_ELEMENT, dl, MVT::i32, ST->getValue(), |
| 9471 | DAG.getTargetConstant(DAG.getDataLayout().isLittleEndian() ? 0 : 1, dl, |
| 9472 | MVT::i32)); |
| 9473 | SDValue Hi = DAG.getNode( |
| 9474 | ISD::EXTRACT_ELEMENT, dl, MVT::i32, ST->getValue(), |
| 9475 | DAG.getTargetConstant(DAG.getDataLayout().isLittleEndian() ? 1 : 0, dl, |
| 9476 | MVT::i32)); |
| 9477 | |
| 9478 | return DAG.getMemIntrinsicNode(ARMISD::STRD, dl, DAG.getVTList(MVT::Other), |
| 9479 | {ST->getChain(), Lo, Hi, ST->getBasePtr()}, |
| 9480 | MemVT, ST->getMemOperand()); |
| 9481 | } else if (Subtarget->hasMVEIntegerOps() && |
| 9482 | ((MemVT == MVT::v4i1 || MemVT == MVT::v8i1 || |
| 9483 | MemVT == MVT::v16i1))) { |
| 9484 | return LowerPredicateStore(Op, DAG); |
| 9485 | } |
| 9486 | |
| 9487 | return SDValue(); |
| 9488 | } |
| 9489 | |
| 9490 | static bool isZeroVector(SDValue N) { |
| 9491 | return (ISD::isBuildVectorAllZeros(N.getNode()) || |
| 9492 | (N->getOpcode() == ARMISD::VMOVIMM && |
| 9493 | isNullConstant(N->getOperand(0)))); |
| 9494 | } |
| 9495 | |
| 9496 | static SDValue LowerMLOAD(SDValue Op, SelectionDAG &DAG) { |
| 9497 | MaskedLoadSDNode *N = cast<MaskedLoadSDNode>(Op.getNode()); |
| 9498 | MVT VT = Op.getSimpleValueType(); |
| 9499 | SDValue Mask = N->getMask(); |
| 9500 | SDValue PassThru = N->getPassThru(); |
| 9501 | SDLoc dl(Op); |
| 9502 | |
| 9503 | if (isZeroVector(PassThru)) |
| 9504 | return Op; |
| 9505 | |
| 9506 | // MVE Masked loads use zero as the passthru value. Here we convert undef to |
| 9507 | // zero too, and other values are lowered to a select. |
| 9508 | SDValue ZeroVec = DAG.getNode(ARMISD::VMOVIMM, dl, VT, |
| 9509 | DAG.getTargetConstant(0, dl, MVT::i32)); |
| 9510 | SDValue NewLoad = DAG.getMaskedLoad( |
| 9511 | VT, dl, N->getChain(), N->getBasePtr(), N->getOffset(), Mask, ZeroVec, |
| 9512 | N->getMemoryVT(), N->getMemOperand(), N->getAddressingMode(), |
| 9513 | N->getExtensionType(), N->isExpandingLoad()); |
| 9514 | SDValue Combo = NewLoad; |
| 9515 | bool PassThruIsCastZero = (PassThru.getOpcode() == ISD::BITCAST || |
| 9516 | PassThru.getOpcode() == ARMISD::VECTOR_REG_CAST) && |
| 9517 | isZeroVector(PassThru->getOperand(0)); |
| 9518 | if (!PassThru.isUndef() && !PassThruIsCastZero) |
| 9519 | Combo = DAG.getNode(ISD::VSELECT, dl, VT, Mask, NewLoad, PassThru); |
| 9520 | return DAG.getMergeValues({Combo, NewLoad.getValue(1)}, dl); |
| 9521 | } |
| 9522 | |
| 9523 | static SDValue LowerVecReduce(SDValue Op, SelectionDAG &DAG, |
| 9524 | const ARMSubtarget *ST) { |
| 9525 | if (!ST->hasMVEIntegerOps()) |
| 9526 | return SDValue(); |
| 9527 | |
| 9528 | SDLoc dl(Op); |
| 9529 | unsigned BaseOpcode = 0; |
| 9530 | switch (Op->getOpcode()) { |
| 9531 | default: llvm_unreachable("Expected VECREDUCE opcode" ); |
| 9532 | case ISD::VECREDUCE_FADD: BaseOpcode = ISD::FADD; break; |
| 9533 | case ISD::VECREDUCE_FMUL: BaseOpcode = ISD::FMUL; break; |
| 9534 | case ISD::VECREDUCE_MUL: BaseOpcode = ISD::MUL; break; |
| 9535 | case ISD::VECREDUCE_AND: BaseOpcode = ISD::AND; break; |
| 9536 | case ISD::VECREDUCE_OR: BaseOpcode = ISD::OR; break; |
| 9537 | case ISD::VECREDUCE_XOR: BaseOpcode = ISD::XOR; break; |
| 9538 | case ISD::VECREDUCE_FMAX: BaseOpcode = ISD::FMAXNUM; break; |
| 9539 | case ISD::VECREDUCE_FMIN: BaseOpcode = ISD::FMINNUM; break; |
| 9540 | } |
| 9541 | |
| 9542 | SDValue Op0 = Op->getOperand(0); |
| 9543 | EVT VT = Op0.getValueType(); |
| 9544 | EVT EltVT = VT.getVectorElementType(); |
| 9545 | unsigned NumElts = VT.getVectorNumElements(); |
| 9546 | unsigned NumActiveLanes = NumElts; |
| 9547 | |
| 9548 | assert((NumActiveLanes == 16 || NumActiveLanes == 8 || NumActiveLanes == 4 || |
| 9549 | NumActiveLanes == 2) && |
| 9550 | "Only expected a power 2 vector size" ); |
| 9551 | |
| 9552 | // Use Mul(X, Rev(X)) until 4 items remain. Going down to 4 vector elements |
| 9553 | // allows us to easily extract vector elements from the lanes. |
| 9554 | while (NumActiveLanes > 4) { |
| 9555 | unsigned RevOpcode = NumActiveLanes == 16 ? ARMISD::VREV16 : ARMISD::VREV32; |
| 9556 | SDValue Rev = DAG.getNode(RevOpcode, dl, VT, Op0); |
| 9557 | Op0 = DAG.getNode(BaseOpcode, dl, VT, Op0, Rev); |
| 9558 | NumActiveLanes /= 2; |
| 9559 | } |
| 9560 | |
| 9561 | SDValue Res; |
| 9562 | if (NumActiveLanes == 4) { |
| 9563 | // The remaining 4 elements are summed sequentially |
| 9564 | SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0, |
| 9565 | DAG.getConstant(0 * NumElts / 4, dl, MVT::i32)); |
| 9566 | SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0, |
| 9567 | DAG.getConstant(1 * NumElts / 4, dl, MVT::i32)); |
| 9568 | SDValue Ext2 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0, |
| 9569 | DAG.getConstant(2 * NumElts / 4, dl, MVT::i32)); |
| 9570 | SDValue Ext3 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0, |
| 9571 | DAG.getConstant(3 * NumElts / 4, dl, MVT::i32)); |
| 9572 | SDValue Res0 = DAG.getNode(BaseOpcode, dl, EltVT, Ext0, Ext1, Op->getFlags()); |
| 9573 | SDValue Res1 = DAG.getNode(BaseOpcode, dl, EltVT, Ext2, Ext3, Op->getFlags()); |
| 9574 | Res = DAG.getNode(BaseOpcode, dl, EltVT, Res0, Res1, Op->getFlags()); |
| 9575 | } else { |
| 9576 | SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0, |
| 9577 | DAG.getConstant(0, dl, MVT::i32)); |
| 9578 | SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0, |
| 9579 | DAG.getConstant(1, dl, MVT::i32)); |
| 9580 | Res = DAG.getNode(BaseOpcode, dl, EltVT, Ext0, Ext1, Op->getFlags()); |
| 9581 | } |
| 9582 | |
| 9583 | // Result type may be wider than element type. |
| 9584 | if (EltVT != Op->getValueType(0)) |
| 9585 | Res = DAG.getNode(ISD::ANY_EXTEND, dl, Op->getValueType(0), Res); |
| 9586 | return Res; |
| 9587 | } |
| 9588 | |
| 9589 | static SDValue LowerVecReduceF(SDValue Op, SelectionDAG &DAG, |
| 9590 | const ARMSubtarget *ST) { |
| 9591 | if (!ST->hasMVEFloatOps()) |
| 9592 | return SDValue(); |
| 9593 | return LowerVecReduce(Op, DAG, ST); |
| 9594 | } |
| 9595 | |
| 9596 | static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) { |
| 9597 | if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getOrdering())) |
| 9598 | // Acquire/Release load/store is not legal for targets without a dmb or |
| 9599 | // equivalent available. |
| 9600 | return SDValue(); |
| 9601 | |
| 9602 | // Monotonic load/store is legal for all targets. |
| 9603 | return Op; |
| 9604 | } |
| 9605 | |
| 9606 | static void ReplaceREADCYCLECOUNTER(SDNode *N, |
| 9607 | SmallVectorImpl<SDValue> &Results, |
| 9608 | SelectionDAG &DAG, |
| 9609 | const ARMSubtarget *Subtarget) { |
| 9610 | SDLoc DL(N); |
| 9611 | // Under Power Management extensions, the cycle-count is: |
| 9612 | // mrc p15, #0, <Rt>, c9, c13, #0 |
| 9613 | SDValue Ops[] = { N->getOperand(0), // Chain |
| 9614 | DAG.getTargetConstant(Intrinsic::arm_mrc, DL, MVT::i32), |
| 9615 | DAG.getTargetConstant(15, DL, MVT::i32), |
| 9616 | DAG.getTargetConstant(0, DL, MVT::i32), |
| 9617 | DAG.getTargetConstant(9, DL, MVT::i32), |
| 9618 | DAG.getTargetConstant(13, DL, MVT::i32), |
| 9619 | DAG.getTargetConstant(0, DL, MVT::i32) |
| 9620 | }; |
| 9621 | |
| 9622 | SDValue Cycles32 = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, |
| 9623 | DAG.getVTList(MVT::i32, MVT::Other), Ops); |
| 9624 | Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Cycles32, |
| 9625 | DAG.getConstant(0, DL, MVT::i32))); |
| 9626 | Results.push_back(Cycles32.getValue(1)); |
| 9627 | } |
| 9628 | |
| 9629 | static SDValue createGPRPairNode(SelectionDAG &DAG, SDValue V) { |
| 9630 | SDLoc dl(V.getNode()); |
| 9631 | SDValue VLo = DAG.getAnyExtOrTrunc(V, dl, MVT::i32); |
| 9632 | SDValue VHi = DAG.getAnyExtOrTrunc( |
| 9633 | DAG.getNode(ISD::SRL, dl, MVT::i64, V, DAG.getConstant(32, dl, MVT::i32)), |
| 9634 | dl, MVT::i32); |
| 9635 | bool isBigEndian = DAG.getDataLayout().isBigEndian(); |
| 9636 | if (isBigEndian) |
| 9637 | std::swap (VLo, VHi); |
| 9638 | SDValue RegClass = |
| 9639 | DAG.getTargetConstant(ARM::GPRPairRegClassID, dl, MVT::i32); |
| 9640 | SDValue SubReg0 = DAG.getTargetConstant(ARM::gsub_0, dl, MVT::i32); |
| 9641 | SDValue SubReg1 = DAG.getTargetConstant(ARM::gsub_1, dl, MVT::i32); |
| 9642 | const SDValue Ops[] = { RegClass, VLo, SubReg0, VHi, SubReg1 }; |
| 9643 | return SDValue( |
| 9644 | DAG.getMachineNode(TargetOpcode::REG_SEQUENCE, dl, MVT::Untyped, Ops), 0); |
| 9645 | } |
| 9646 | |
| 9647 | static void ReplaceCMP_SWAP_64Results(SDNode *N, |
| 9648 | SmallVectorImpl<SDValue> & Results, |
| 9649 | SelectionDAG &DAG) { |
| 9650 | assert(N->getValueType(0) == MVT::i64 && |
| 9651 | "AtomicCmpSwap on types less than 64 should be legal" ); |
| 9652 | SDValue Ops[] = {N->getOperand(1), |
| 9653 | createGPRPairNode(DAG, N->getOperand(2)), |
| 9654 | createGPRPairNode(DAG, N->getOperand(3)), |
| 9655 | N->getOperand(0)}; |
| 9656 | SDNode *CmpSwap = DAG.getMachineNode( |
| 9657 | ARM::CMP_SWAP_64, SDLoc(N), |
| 9658 | DAG.getVTList(MVT::Untyped, MVT::i32, MVT::Other), Ops); |
| 9659 | |
| 9660 | MachineMemOperand *MemOp = cast<MemSDNode>(N)->getMemOperand(); |
| 9661 | DAG.setNodeMemRefs(cast<MachineSDNode>(CmpSwap), {MemOp}); |
| 9662 | |
| 9663 | bool isBigEndian = DAG.getDataLayout().isBigEndian(); |
| 9664 | |
| 9665 | SDValue Lo = |
| 9666 | DAG.getTargetExtractSubreg(isBigEndian ? ARM::gsub_1 : ARM::gsub_0, |
| 9667 | SDLoc(N), MVT::i32, SDValue(CmpSwap, 0)); |
| 9668 | SDValue Hi = |
| 9669 | DAG.getTargetExtractSubreg(isBigEndian ? ARM::gsub_0 : ARM::gsub_1, |
| 9670 | SDLoc(N), MVT::i32, SDValue(CmpSwap, 0)); |
| 9671 | Results.push_back(DAG.getNode(ISD::BUILD_PAIR, SDLoc(N), MVT::i64, Lo, Hi)); |
| 9672 | Results.push_back(SDValue(CmpSwap, 2)); |
| 9673 | } |
| 9674 | |
| 9675 | SDValue ARMTargetLowering::LowerFSETCC(SDValue Op, SelectionDAG &DAG) const { |
| 9676 | SDLoc dl(Op); |
| 9677 | EVT VT = Op.getValueType(); |
| 9678 | SDValue Chain = Op.getOperand(0); |
| 9679 | SDValue LHS = Op.getOperand(1); |
| 9680 | SDValue RHS = Op.getOperand(2); |
| 9681 | ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(3))->get(); |
| 9682 | bool IsSignaling = Op.getOpcode() == ISD::STRICT_FSETCCS; |
| 9683 | |
| 9684 | // If we don't have instructions of this float type then soften to a libcall |
| 9685 | // and use SETCC instead. |
| 9686 | if (isUnsupportedFloatingType(LHS.getValueType())) { |
| 9687 | DAG.getTargetLoweringInfo().softenSetCCOperands( |
| 9688 | DAG, LHS.getValueType(), LHS, RHS, CC, dl, LHS, RHS, Chain, IsSignaling); |
| 9689 | if (!RHS.getNode()) { |
| 9690 | RHS = DAG.getConstant(0, dl, LHS.getValueType()); |
| 9691 | CC = ISD::SETNE; |
| 9692 | } |
| 9693 | SDValue Result = DAG.getNode(ISD::SETCC, dl, VT, LHS, RHS, |
| 9694 | DAG.getCondCode(CC)); |
| 9695 | return DAG.getMergeValues({Result, Chain}, dl); |
| 9696 | } |
| 9697 | |
| 9698 | ARMCC::CondCodes CondCode, CondCode2; |
| 9699 | FPCCToARMCC(CC, CondCode, CondCode2); |
| 9700 | |
| 9701 | // FIXME: Chain is not handled correctly here. Currently the FPSCR is implicit |
| 9702 | // in CMPFP and CMPFPE, but instead it should be made explicit by these |
| 9703 | // instructions using a chain instead of glue. This would also fix the problem |
| 9704 | // here (and also in LowerSELECT_CC) where we generate two comparisons when |
| 9705 | // CondCode2 != AL. |
| 9706 | SDValue True = DAG.getConstant(1, dl, VT); |
| 9707 | SDValue False = DAG.getConstant(0, dl, VT); |
| 9708 | SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32); |
| 9709 | SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); |
| 9710 | SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl, IsSignaling); |
| 9711 | SDValue Result = getCMOV(dl, VT, False, True, ARMcc, CCR, Cmp, DAG); |
| 9712 | if (CondCode2 != ARMCC::AL) { |
| 9713 | ARMcc = DAG.getConstant(CondCode2, dl, MVT::i32); |
| 9714 | Cmp = getVFPCmp(LHS, RHS, DAG, dl, IsSignaling); |
| 9715 | Result = getCMOV(dl, VT, Result, True, ARMcc, CCR, Cmp, DAG); |
| 9716 | } |
| 9717 | return DAG.getMergeValues({Result, Chain}, dl); |
| 9718 | } |
| 9719 | |
| 9720 | SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { |
| 9721 | LLVM_DEBUG(dbgs() << "Lowering node: " ; Op.dump()); |
| 9722 | switch (Op.getOpcode()) { |
| 9723 | default: llvm_unreachable("Don't know how to custom lower this!" ); |
| 9724 | case ISD::WRITE_REGISTER: return LowerWRITE_REGISTER(Op, DAG); |
| 9725 | case ISD::ConstantPool: return LowerConstantPool(Op, DAG); |
| 9726 | case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); |
| 9727 | case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); |
| 9728 | case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); |
| 9729 | case ISD::SELECT: return LowerSELECT(Op, DAG); |
| 9730 | case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); |
| 9731 | case ISD::BRCOND: return LowerBRCOND(Op, DAG); |
| 9732 | case ISD::BR_CC: return LowerBR_CC(Op, DAG); |
| 9733 | case ISD::BR_JT: return LowerBR_JT(Op, DAG); |
| 9734 | case ISD::VASTART: return LowerVASTART(Op, DAG); |
| 9735 | case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG, Subtarget); |
| 9736 | case ISD::PREFETCH: return LowerPREFETCH(Op, DAG, Subtarget); |
| 9737 | case ISD::SINT_TO_FP: |
| 9738 | case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG); |
| 9739 | case ISD::STRICT_FP_TO_SINT: |
| 9740 | case ISD::STRICT_FP_TO_UINT: |
| 9741 | case ISD::FP_TO_SINT: |
| 9742 | case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG); |
| 9743 | case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); |
| 9744 | case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); |
| 9745 | case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); |
| 9746 | case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG); |
| 9747 | case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG); |
| 9748 | case ISD::EH_SJLJ_SETUP_DISPATCH: return LowerEH_SJLJ_SETUP_DISPATCH(Op, DAG); |
| 9749 | case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG, Subtarget); |
| 9750 | case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG, |
| 9751 | Subtarget); |
| 9752 | case ISD::BITCAST: return ExpandBITCAST(Op.getNode(), DAG, Subtarget); |
| 9753 | case ISD::SHL: |
| 9754 | case ISD::SRL: |
| 9755 | case ISD::SRA: return LowerShift(Op.getNode(), DAG, Subtarget); |
| 9756 | case ISD::SREM: return LowerREM(Op.getNode(), DAG); |
| 9757 | case ISD::UREM: return LowerREM(Op.getNode(), DAG); |
| 9758 | case ISD::SHL_PARTS: return LowerShiftLeftParts(Op, DAG); |
| 9759 | case ISD::SRL_PARTS: |
| 9760 | case ISD::SRA_PARTS: return LowerShiftRightParts(Op, DAG); |
| 9761 | case ISD::CTTZ: |
| 9762 | case ISD::CTTZ_ZERO_UNDEF: return LowerCTTZ(Op.getNode(), DAG, Subtarget); |
| 9763 | case ISD::CTPOP: return LowerCTPOP(Op.getNode(), DAG, Subtarget); |
| 9764 | case ISD::SETCC: return LowerVSETCC(Op, DAG, Subtarget); |
| 9765 | case ISD::SETCCCARRY: return LowerSETCCCARRY(Op, DAG); |
| 9766 | case ISD::ConstantFP: return LowerConstantFP(Op, DAG, Subtarget); |
| 9767 | case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG, Subtarget); |
| 9768 | case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG, Subtarget); |
| 9769 | case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG, Subtarget); |
| 9770 | case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); |
| 9771 | case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG, Subtarget); |
| 9772 | case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG, Subtarget); |
| 9773 | case ISD::TRUNCATE: return LowerTruncatei1(Op, DAG, Subtarget); |
| 9774 | case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); |
| 9775 | case ISD::MUL: return LowerMUL(Op, DAG); |
| 9776 | case ISD::SDIV: |
| 9777 | if (Subtarget->isTargetWindows() && !Op.getValueType().isVector()) |
| 9778 | return LowerDIV_Windows(Op, DAG, /* Signed */ true); |
| 9779 | return LowerSDIV(Op, DAG, Subtarget); |
| 9780 | case ISD::UDIV: |
| 9781 | if (Subtarget->isTargetWindows() && !Op.getValueType().isVector()) |
| 9782 | return LowerDIV_Windows(Op, DAG, /* Signed */ false); |
| 9783 | return LowerUDIV(Op, DAG, Subtarget); |
| 9784 | case ISD::ADDCARRY: |
| 9785 | case ISD::SUBCARRY: return LowerADDSUBCARRY(Op, DAG); |
| 9786 | case ISD::SADDO: |
| 9787 | case ISD::SSUBO: |
| 9788 | return LowerSignedALUO(Op, DAG); |
| 9789 | case ISD::UADDO: |
| 9790 | case ISD::USUBO: |
| 9791 | return LowerUnsignedALUO(Op, DAG); |
| 9792 | case ISD::SADDSAT: |
| 9793 | case ISD::SSUBSAT: |
| 9794 | return LowerSADDSUBSAT(Op, DAG, Subtarget); |
| 9795 | case ISD::LOAD: |
| 9796 | return LowerPredicateLoad(Op, DAG); |
| 9797 | case ISD::STORE: |
| 9798 | return LowerSTORE(Op, DAG, Subtarget); |
| 9799 | case ISD::MLOAD: |
| 9800 | return LowerMLOAD(Op, DAG); |
| 9801 | case ISD::VECREDUCE_MUL: |
| 9802 | case ISD::VECREDUCE_AND: |
| 9803 | case ISD::VECREDUCE_OR: |
| 9804 | case ISD::VECREDUCE_XOR: |
| 9805 | return LowerVecReduce(Op, DAG, Subtarget); |
| 9806 | case ISD::VECREDUCE_FADD: |
| 9807 | case ISD::VECREDUCE_FMUL: |
| 9808 | case ISD::VECREDUCE_FMIN: |
| 9809 | case ISD::VECREDUCE_FMAX: |
| 9810 | return LowerVecReduceF(Op, DAG, Subtarget); |
| 9811 | case ISD::ATOMIC_LOAD: |
| 9812 | case ISD::ATOMIC_STORE: return LowerAtomicLoadStore(Op, DAG); |
| 9813 | case ISD::FSINCOS: return LowerFSINCOS(Op, DAG); |
| 9814 | case ISD::SDIVREM: |
| 9815 | case ISD::UDIVREM: return LowerDivRem(Op, DAG); |
| 9816 | case ISD::DYNAMIC_STACKALLOC: |
| 9817 | if (Subtarget->isTargetWindows()) |
| 9818 | return LowerDYNAMIC_STACKALLOC(Op, DAG); |
| 9819 | llvm_unreachable("Don't know how to custom lower this!" ); |
| 9820 | case ISD::STRICT_FP_ROUND: |
| 9821 | case ISD::FP_ROUND: return LowerFP_ROUND(Op, DAG); |
| 9822 | case ISD::STRICT_FP_EXTEND: |
| 9823 | case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG); |
| 9824 | case ISD::STRICT_FSETCC: |
| 9825 | case ISD::STRICT_FSETCCS: return LowerFSETCC(Op, DAG); |
| 9826 | case ARMISD::WIN__DBZCHK: return SDValue(); |
| 9827 | } |
| 9828 | } |
| 9829 | |
| 9830 | static void ReplaceLongIntrinsic(SDNode *N, SmallVectorImpl<SDValue> &Results, |
| 9831 | SelectionDAG &DAG) { |
| 9832 | unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); |
| 9833 | unsigned Opc = 0; |
| 9834 | if (IntNo == Intrinsic::arm_smlald) |
| 9835 | Opc = ARMISD::SMLALD; |
| 9836 | else if (IntNo == Intrinsic::arm_smlaldx) |
| 9837 | Opc = ARMISD::SMLALDX; |
| 9838 | else if (IntNo == Intrinsic::arm_smlsld) |
| 9839 | Opc = ARMISD::SMLSLD; |
| 9840 | else if (IntNo == Intrinsic::arm_smlsldx) |
| 9841 | Opc = ARMISD::SMLSLDX; |
| 9842 | else |
| 9843 | return; |
| 9844 | |
| 9845 | SDLoc dl(N); |
| 9846 | SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, |
| 9847 | N->getOperand(3), |
| 9848 | DAG.getConstant(0, dl, MVT::i32)); |
| 9849 | SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, |
| 9850 | N->getOperand(3), |
| 9851 | DAG.getConstant(1, dl, MVT::i32)); |
| 9852 | |
| 9853 | SDValue LongMul = DAG.getNode(Opc, dl, |
| 9854 | DAG.getVTList(MVT::i32, MVT::i32), |
| 9855 | N->getOperand(1), N->getOperand(2), |
| 9856 | Lo, Hi); |
| 9857 | Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, |
| 9858 | LongMul.getValue(0), LongMul.getValue(1))); |
| 9859 | } |
| 9860 | |
| 9861 | /// ReplaceNodeResults - Replace the results of node with an illegal result |
| 9862 | /// type with new values built out of custom code. |
| 9863 | void ARMTargetLowering::ReplaceNodeResults(SDNode *N, |
| 9864 | SmallVectorImpl<SDValue> &Results, |
| 9865 | SelectionDAG &DAG) const { |
| 9866 | SDValue Res; |
| 9867 | switch (N->getOpcode()) { |
| 9868 | default: |
| 9869 | llvm_unreachable("Don't know how to custom expand this!" ); |
| 9870 | case ISD::READ_REGISTER: |
| 9871 | ExpandREAD_REGISTER(N, Results, DAG); |
| 9872 | break; |
| 9873 | case ISD::BITCAST: |
| 9874 | Res = ExpandBITCAST(N, DAG, Subtarget); |
| 9875 | break; |
| 9876 | case ISD::SRL: |
| 9877 | case ISD::SRA: |
| 9878 | case ISD::SHL: |
| 9879 | Res = Expand64BitShift(N, DAG, Subtarget); |
| 9880 | break; |
| 9881 | case ISD::SREM: |
| 9882 | case ISD::UREM: |
| 9883 | Res = LowerREM(N, DAG); |
| 9884 | break; |
| 9885 | case ISD::SDIVREM: |
| 9886 | case ISD::UDIVREM: |
| 9887 | Res = LowerDivRem(SDValue(N, 0), DAG); |
| 9888 | assert(Res.getNumOperands() == 2 && "DivRem needs two values" ); |
| 9889 | Results.push_back(Res.getValue(0)); |
| 9890 | Results.push_back(Res.getValue(1)); |
| 9891 | return; |
| 9892 | case ISD::SADDSAT: |
| 9893 | case ISD::SSUBSAT: |
| 9894 | Res = LowerSADDSUBSAT(SDValue(N, 0), DAG, Subtarget); |
| 9895 | break; |
| 9896 | case ISD::READCYCLECOUNTER: |
| 9897 | ReplaceREADCYCLECOUNTER(N, Results, DAG, Subtarget); |
| 9898 | return; |
| 9899 | case ISD::UDIV: |
| 9900 | case ISD::SDIV: |
| 9901 | assert(Subtarget->isTargetWindows() && "can only expand DIV on Windows" ); |
| 9902 | return ExpandDIV_Windows(SDValue(N, 0), DAG, N->getOpcode() == ISD::SDIV, |
| 9903 | Results); |
| 9904 | case ISD::ATOMIC_CMP_SWAP: |
| 9905 | ReplaceCMP_SWAP_64Results(N, Results, DAG); |
| 9906 | return; |
| 9907 | case ISD::INTRINSIC_WO_CHAIN: |
| 9908 | return ReplaceLongIntrinsic(N, Results, DAG); |
| 9909 | case ISD::ABS: |
| 9910 | lowerABS(N, Results, DAG); |
| 9911 | return ; |
| 9912 | case ISD::LOAD: |
| 9913 | LowerLOAD(N, Results, DAG); |
| 9914 | break; |
| 9915 | } |
| 9916 | if (Res.getNode()) |
| 9917 | Results.push_back(Res); |
| 9918 | } |
| 9919 | |
| 9920 | //===----------------------------------------------------------------------===// |
| 9921 | // ARM Scheduler Hooks |
| 9922 | //===----------------------------------------------------------------------===// |
| 9923 | |
| 9924 | /// SetupEntryBlockForSjLj - Insert code into the entry block that creates and |
| 9925 | /// registers the function context. |
| 9926 | void ARMTargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI, |
| 9927 | MachineBasicBlock *MBB, |
| 9928 | MachineBasicBlock *DispatchBB, |
| 9929 | int FI) const { |
| 9930 | assert(!Subtarget->isROPI() && !Subtarget->isRWPI() && |
| 9931 | "ROPI/RWPI not currently supported with SjLj" ); |
| 9932 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
| 9933 | DebugLoc dl = MI.getDebugLoc(); |
| 9934 | MachineFunction *MF = MBB->getParent(); |
| 9935 | MachineRegisterInfo *MRI = &MF->getRegInfo(); |
| 9936 | MachineConstantPool *MCP = MF->getConstantPool(); |
| 9937 | ARMFunctionInfo *AFI = MF->getInfo<ARMFunctionInfo>(); |
| 9938 | const Function &F = MF->getFunction(); |
| 9939 | |
| 9940 | bool isThumb = Subtarget->isThumb(); |
| 9941 | bool isThumb2 = Subtarget->isThumb2(); |
| 9942 | |
| 9943 | unsigned PCLabelId = AFI->createPICLabelUId(); |
| 9944 | unsigned PCAdj = (isThumb || isThumb2) ? 4 : 8; |
| 9945 | ARMConstantPoolValue *CPV = |
| 9946 | ARMConstantPoolMBB::Create(F.getContext(), DispatchBB, PCLabelId, PCAdj); |
| 9947 | unsigned CPI = MCP->getConstantPoolIndex(CPV, Align(4)); |
| 9948 | |
| 9949 | const TargetRegisterClass *TRC = isThumb ? &ARM::tGPRRegClass |
| 9950 | : &ARM::GPRRegClass; |
| 9951 | |
| 9952 | // Grab constant pool and fixed stack memory operands. |
| 9953 | MachineMemOperand *CPMMO = |
| 9954 | MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(*MF), |
| 9955 | MachineMemOperand::MOLoad, 4, Align(4)); |
| 9956 | |
| 9957 | MachineMemOperand *FIMMOSt = |
| 9958 | MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI), |
| 9959 | MachineMemOperand::MOStore, 4, Align(4)); |
| 9960 | |
| 9961 | // Load the address of the dispatch MBB into the jump buffer. |
| 9962 | if (isThumb2) { |
| 9963 | // Incoming value: jbuf |
| 9964 | // ldr.n r5, LCPI1_1 |
| 9965 | // orr r5, r5, #1 |
| 9966 | // add r5, pc |
| 9967 | // str r5, [$jbuf, #+4] ; &jbuf[1] |
| 9968 | Register NewVReg1 = MRI->createVirtualRegister(TRC); |
| 9969 | BuildMI(*MBB, MI, dl, TII->get(ARM::t2LDRpci), NewVReg1) |
| 9970 | .addConstantPoolIndex(CPI) |
| 9971 | .addMemOperand(CPMMO) |
| 9972 | .add(predOps(ARMCC::AL)); |
| 9973 | // Set the low bit because of thumb mode. |
| 9974 | Register NewVReg2 = MRI->createVirtualRegister(TRC); |
| 9975 | BuildMI(*MBB, MI, dl, TII->get(ARM::t2ORRri), NewVReg2) |
| 9976 | .addReg(NewVReg1, RegState::Kill) |
| 9977 | .addImm(0x01) |
| 9978 | .add(predOps(ARMCC::AL)) |
| 9979 | .add(condCodeOp()); |
| 9980 | Register NewVReg3 = MRI->createVirtualRegister(TRC); |
| 9981 | BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg3) |
| 9982 | .addReg(NewVReg2, RegState::Kill) |
| 9983 | .addImm(PCLabelId); |
| 9984 | BuildMI(*MBB, MI, dl, TII->get(ARM::t2STRi12)) |
| 9985 | .addReg(NewVReg3, RegState::Kill) |
| 9986 | .addFrameIndex(FI) |
| 9987 | .addImm(36) // &jbuf[1] :: pc |
| 9988 | .addMemOperand(FIMMOSt) |
| 9989 | .add(predOps(ARMCC::AL)); |
| 9990 | } else if (isThumb) { |
| 9991 | // Incoming value: jbuf |
| 9992 | // ldr.n r1, LCPI1_4 |
| 9993 | // add r1, pc |
| 9994 | // mov r2, #1 |
| 9995 | // orrs r1, r2 |
| 9996 | // add r2, $jbuf, #+4 ; &jbuf[1] |
| 9997 | // str r1, [r2] |
| 9998 | Register NewVReg1 = MRI->createVirtualRegister(TRC); |
| 9999 | BuildMI(*MBB, MI, dl, TII->get(ARM::tLDRpci), NewVReg1) |
| 10000 | .addConstantPoolIndex(CPI) |
| 10001 | .addMemOperand(CPMMO) |
| 10002 | .add(predOps(ARMCC::AL)); |
| 10003 | Register NewVReg2 = MRI->createVirtualRegister(TRC); |
| 10004 | BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg2) |
| 10005 | .addReg(NewVReg1, RegState::Kill) |
| 10006 | .addImm(PCLabelId); |
| 10007 | // Set the low bit because of thumb mode. |
| 10008 | Register NewVReg3 = MRI->createVirtualRegister(TRC); |
| 10009 | BuildMI(*MBB, MI, dl, TII->get(ARM::tMOVi8), NewVReg3) |
| 10010 | .addReg(ARM::CPSR, RegState::Define) |
| 10011 | .addImm(1) |
| 10012 | .add(predOps(ARMCC::AL)); |
| 10013 | Register NewVReg4 = MRI->createVirtualRegister(TRC); |
| 10014 | BuildMI(*MBB, MI, dl, TII->get(ARM::tORR), NewVReg4) |
| 10015 | .addReg(ARM::CPSR, RegState::Define) |
| 10016 | .addReg(NewVReg2, RegState::Kill) |
| 10017 | .addReg(NewVReg3, RegState::Kill) |
| 10018 | .add(predOps(ARMCC::AL)); |
| 10019 | Register NewVReg5 = MRI->createVirtualRegister(TRC); |
| 10020 | BuildMI(*MBB, MI, dl, TII->get(ARM::tADDframe), NewVReg5) |
| 10021 | .addFrameIndex(FI) |
| 10022 | .addImm(36); // &jbuf[1] :: pc |
| 10023 | BuildMI(*MBB, MI, dl, TII->get(ARM::tSTRi)) |
| 10024 | .addReg(NewVReg4, RegState::Kill) |
| 10025 | .addReg(NewVReg5, RegState::Kill) |
| 10026 | .addImm(0) |
| 10027 | .addMemOperand(FIMMOSt) |
| 10028 | .add(predOps(ARMCC::AL)); |
| 10029 | } else { |
| 10030 | // Incoming value: jbuf |
| 10031 | // ldr r1, LCPI1_1 |
| 10032 | // add r1, pc, r1 |
| 10033 | // str r1, [$jbuf, #+4] ; &jbuf[1] |
| 10034 | Register NewVReg1 = MRI->createVirtualRegister(TRC); |
| 10035 | BuildMI(*MBB, MI, dl, TII->get(ARM::LDRi12), NewVReg1) |
| 10036 | .addConstantPoolIndex(CPI) |
| 10037 | .addImm(0) |
| 10038 | .addMemOperand(CPMMO) |
| 10039 | .add(predOps(ARMCC::AL)); |
| 10040 | Register NewVReg2 = MRI->createVirtualRegister(TRC); |
| 10041 | BuildMI(*MBB, MI, dl, TII->get(ARM::PICADD), NewVReg2) |
| 10042 | .addReg(NewVReg1, RegState::Kill) |
| 10043 | .addImm(PCLabelId) |
| 10044 | .add(predOps(ARMCC::AL)); |
| 10045 | BuildMI(*MBB, MI, dl, TII->get(ARM::STRi12)) |
| 10046 | .addReg(NewVReg2, RegState::Kill) |
| 10047 | .addFrameIndex(FI) |
| 10048 | .addImm(36) // &jbuf[1] :: pc |
| 10049 | .addMemOperand(FIMMOSt) |
| 10050 | .add(predOps(ARMCC::AL)); |
| 10051 | } |
| 10052 | } |
| 10053 | |
| 10054 | void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI, |
| 10055 | MachineBasicBlock *MBB) const { |
| 10056 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
| 10057 | DebugLoc dl = MI.getDebugLoc(); |
| 10058 | MachineFunction *MF = MBB->getParent(); |
| 10059 | MachineRegisterInfo *MRI = &MF->getRegInfo(); |
| 10060 | MachineFrameInfo &MFI = MF->getFrameInfo(); |
| 10061 | int FI = MFI.getFunctionContextIndex(); |
| 10062 | |
| 10063 | const TargetRegisterClass *TRC = Subtarget->isThumb() ? &ARM::tGPRRegClass |
| 10064 | : &ARM::GPRnopcRegClass; |
| 10065 | |
| 10066 | // Get a mapping of the call site numbers to all of the landing pads they're |
| 10067 | // associated with. |
| 10068 | DenseMap<unsigned, SmallVector<MachineBasicBlock*, 2>> CallSiteNumToLPad; |
| 10069 | unsigned MaxCSNum = 0; |
| 10070 | for (MachineFunction::iterator BB = MF->begin(), E = MF->end(); BB != E; |
| 10071 | ++BB) { |
| 10072 | if (!BB->isEHPad()) continue; |
| 10073 | |
| 10074 | // FIXME: We should assert that the EH_LABEL is the first MI in the landing |
| 10075 | // pad. |
| 10076 | for (MachineBasicBlock::iterator |
| 10077 | II = BB->begin(), IE = BB->end(); II != IE; ++II) { |
| 10078 | if (!II->isEHLabel()) continue; |
| 10079 | |
| 10080 | MCSymbol *Sym = II->getOperand(0).getMCSymbol(); |
| 10081 | if (!MF->hasCallSiteLandingPad(Sym)) continue; |
| 10082 | |
| 10083 | SmallVectorImpl<unsigned> &CallSiteIdxs = MF->getCallSiteLandingPad(Sym); |
| 10084 | for (SmallVectorImpl<unsigned>::iterator |
| 10085 | CSI = CallSiteIdxs.begin(), CSE = CallSiteIdxs.end(); |
| 10086 | CSI != CSE; ++CSI) { |
| 10087 | CallSiteNumToLPad[*CSI].push_back(&*BB); |
| 10088 | MaxCSNum = std::max(MaxCSNum, *CSI); |
| 10089 | } |
| 10090 | break; |
| 10091 | } |
| 10092 | } |
| 10093 | |
| 10094 | // Get an ordered list of the machine basic blocks for the jump table. |
| 10095 | std::vector<MachineBasicBlock*> LPadList; |
| 10096 | SmallPtrSet<MachineBasicBlock*, 32> InvokeBBs; |
| 10097 | LPadList.reserve(CallSiteNumToLPad.size()); |
| 10098 | for (unsigned I = 1; I <= MaxCSNum; ++I) { |
| 10099 | SmallVectorImpl<MachineBasicBlock*> &MBBList = CallSiteNumToLPad[I]; |
| 10100 | for (SmallVectorImpl<MachineBasicBlock*>::iterator |
| 10101 | II = MBBList.begin(), IE = MBBList.end(); II != IE; ++II) { |
| 10102 | LPadList.push_back(*II); |
| 10103 | InvokeBBs.insert((*II)->pred_begin(), (*II)->pred_end()); |
| 10104 | } |
| 10105 | } |
| 10106 | |
| 10107 | assert(!LPadList.empty() && |
| 10108 | "No landing pad destinations for the dispatch jump table!" ); |
| 10109 | |
| 10110 | // Create the jump table and associated information. |
| 10111 | MachineJumpTableInfo *JTI = |
| 10112 | MF->getOrCreateJumpTableInfo(MachineJumpTableInfo::EK_Inline); |
| 10113 | unsigned MJTI = JTI->createJumpTableIndex(LPadList); |
| 10114 | |
| 10115 | // Create the MBBs for the dispatch code. |
| 10116 | |
| 10117 | // Shove the dispatch's address into the return slot in the function context. |
| 10118 | MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock(); |
| 10119 | DispatchBB->setIsEHPad(); |
| 10120 | |
| 10121 | MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock(); |
| 10122 | unsigned trap_opcode; |
| 10123 | if (Subtarget->isThumb()) |
| 10124 | trap_opcode = ARM::tTRAP; |
| 10125 | else |
| 10126 | trap_opcode = Subtarget->useNaClTrap() ? ARM::TRAPNaCl : ARM::TRAP; |
| 10127 | |
| 10128 | BuildMI(TrapBB, dl, TII->get(trap_opcode)); |
| 10129 | DispatchBB->addSuccessor(TrapBB); |
| 10130 | |
| 10131 | MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock(); |
| 10132 | DispatchBB->addSuccessor(DispContBB); |
| 10133 | |
| 10134 | // Insert and MBBs. |
| 10135 | MF->insert(MF->end(), DispatchBB); |
| 10136 | MF->insert(MF->end(), DispContBB); |
| 10137 | MF->insert(MF->end(), TrapBB); |
| 10138 | |
| 10139 | // Insert code into the entry block that creates and registers the function |
| 10140 | // context. |
| 10141 | SetupEntryBlockForSjLj(MI, MBB, DispatchBB, FI); |
| 10142 | |
| 10143 | MachineMemOperand *FIMMOLd = MF->getMachineMemOperand( |
| 10144 | MachinePointerInfo::getFixedStack(*MF, FI), |
| 10145 | MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile, 4, Align(4)); |
| 10146 | |
| 10147 | MachineInstrBuilder MIB; |
| 10148 | MIB = BuildMI(DispatchBB, dl, TII->get(ARM::Int_eh_sjlj_dispatchsetup)); |
| 10149 | |
| 10150 | const ARMBaseInstrInfo *AII = static_cast<const ARMBaseInstrInfo*>(TII); |
| 10151 | const ARMBaseRegisterInfo &RI = AII->getRegisterInfo(); |
| 10152 | |
| 10153 | // Add a register mask with no preserved registers. This results in all |
| 10154 | // registers being marked as clobbered. This can't work if the dispatch block |
| 10155 | // is in a Thumb1 function and is linked with ARM code which uses the FP |
| 10156 | // registers, as there is no way to preserve the FP registers in Thumb1 mode. |
| 10157 | MIB.addRegMask(RI.getSjLjDispatchPreservedMask(*MF)); |
| 10158 | |
| 10159 | bool IsPositionIndependent = isPositionIndependent(); |
| 10160 | unsigned NumLPads = LPadList.size(); |
| 10161 | if (Subtarget->isThumb2()) { |
| 10162 | Register NewVReg1 = MRI->createVirtualRegister(TRC); |
| 10163 | BuildMI(DispatchBB, dl, TII->get(ARM::t2LDRi12), NewVReg1) |
| 10164 | .addFrameIndex(FI) |
| 10165 | .addImm(4) |
| 10166 | .addMemOperand(FIMMOLd) |
| 10167 | .add(predOps(ARMCC::AL)); |
| 10168 | |
| 10169 | if (NumLPads < 256) { |
| 10170 | BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPri)) |
| 10171 | .addReg(NewVReg1) |
| 10172 | .addImm(LPadList.size()) |
| 10173 | .add(predOps(ARMCC::AL)); |
| 10174 | } else { |
| 10175 | Register VReg1 = MRI->createVirtualRegister(TRC); |
| 10176 | BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVi16), VReg1) |
| 10177 | .addImm(NumLPads & 0xFFFF) |
| 10178 | .add(predOps(ARMCC::AL)); |
| 10179 | |
| 10180 | unsigned VReg2 = VReg1; |
| 10181 | if ((NumLPads & 0xFFFF0000) != 0) { |
| 10182 | VReg2 = MRI->createVirtualRegister(TRC); |
| 10183 | BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVTi16), VReg2) |
| 10184 | .addReg(VReg1) |
| 10185 | .addImm(NumLPads >> 16) |
| 10186 | .add(predOps(ARMCC::AL)); |
| 10187 | } |
| 10188 | |
| 10189 | BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPrr)) |
| 10190 | .addReg(NewVReg1) |
| 10191 | .addReg(VReg2) |
| 10192 | .add(predOps(ARMCC::AL)); |
| 10193 | } |
| 10194 | |
| 10195 | BuildMI(DispatchBB, dl, TII->get(ARM::t2Bcc)) |
| 10196 | .addMBB(TrapBB) |
| 10197 | .addImm(ARMCC::HI) |
| 10198 | .addReg(ARM::CPSR); |
| 10199 | |
| 10200 | Register NewVReg3 = MRI->createVirtualRegister(TRC); |
| 10201 | BuildMI(DispContBB, dl, TII->get(ARM::t2LEApcrelJT), NewVReg3) |
| 10202 | .addJumpTableIndex(MJTI) |
| 10203 | .add(predOps(ARMCC::AL)); |
| 10204 | |
| 10205 | Register NewVReg4 = MRI->createVirtualRegister(TRC); |
| 10206 | BuildMI(DispContBB, dl, TII->get(ARM::t2ADDrs), NewVReg4) |
| 10207 | .addReg(NewVReg3, RegState::Kill) |
| 10208 | .addReg(NewVReg1) |
| 10209 | .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2)) |
| 10210 | .add(predOps(ARMCC::AL)) |
| 10211 | .add(condCodeOp()); |
| 10212 | |
| 10213 | BuildMI(DispContBB, dl, TII->get(ARM::t2BR_JT)) |
| 10214 | .addReg(NewVReg4, RegState::Kill) |
| 10215 | .addReg(NewVReg1) |
| 10216 | .addJumpTableIndex(MJTI); |
| 10217 | } else if (Subtarget->isThumb()) { |
| 10218 | Register NewVReg1 = MRI->createVirtualRegister(TRC); |
| 10219 | BuildMI(DispatchBB, dl, TII->get(ARM::tLDRspi), NewVReg1) |
| 10220 | .addFrameIndex(FI) |
| 10221 | .addImm(1) |
| 10222 | .addMemOperand(FIMMOLd) |
| 10223 | .add(predOps(ARMCC::AL)); |
| 10224 | |
| 10225 | if (NumLPads < 256) { |
| 10226 | BuildMI(DispatchBB, dl, TII->get(ARM::tCMPi8)) |
| 10227 | .addReg(NewVReg1) |
| 10228 | .addImm(NumLPads) |
| 10229 | .add(predOps(ARMCC::AL)); |
| 10230 | } else { |
| 10231 | MachineConstantPool *ConstantPool = MF->getConstantPool(); |
| 10232 | Type *Int32Ty = Type::getInt32Ty(MF->getFunction().getContext()); |
| 10233 | const Constant *C = ConstantInt::get(Int32Ty, NumLPads); |
| 10234 | |
| 10235 | // MachineConstantPool wants an explicit alignment. |
| 10236 | Align Alignment = MF->getDataLayout().getPrefTypeAlign(Int32Ty); |
| 10237 | unsigned Idx = ConstantPool->getConstantPoolIndex(C, Alignment); |
| 10238 | |
| 10239 | Register VReg1 = MRI->createVirtualRegister(TRC); |
| 10240 | BuildMI(DispatchBB, dl, TII->get(ARM::tLDRpci)) |
| 10241 | .addReg(VReg1, RegState::Define) |
| 10242 | .addConstantPoolIndex(Idx) |
| 10243 | .add(predOps(ARMCC::AL)); |
| 10244 | BuildMI(DispatchBB, dl, TII->get(ARM::tCMPr)) |
| 10245 | .addReg(NewVReg1) |
| 10246 | .addReg(VReg1) |
| 10247 | .add(predOps(ARMCC::AL)); |
| 10248 | } |
| 10249 | |
| 10250 | BuildMI(DispatchBB, dl, TII->get(ARM::tBcc)) |
| 10251 | .addMBB(TrapBB) |
| 10252 | .addImm(ARMCC::HI) |
| 10253 | .addReg(ARM::CPSR); |
| 10254 | |
| 10255 | Register NewVReg2 = MRI->createVirtualRegister(TRC); |
| 10256 | BuildMI(DispContBB, dl, TII->get(ARM::tLSLri), NewVReg2) |
| 10257 | .addReg(ARM::CPSR, RegState::Define) |
| 10258 | .addReg(NewVReg1) |
| 10259 | .addImm(2) |
| 10260 | .add(predOps(ARMCC::AL)); |
| 10261 | |
| 10262 | Register NewVReg3 = MRI->createVirtualRegister(TRC); |
| 10263 | BuildMI(DispContBB, dl, TII->get(ARM::tLEApcrelJT), NewVReg3) |
| 10264 | .addJumpTableIndex(MJTI) |
| 10265 | .add(predOps(ARMCC::AL)); |
| 10266 | |
| 10267 | Register NewVReg4 = MRI->createVirtualRegister(TRC); |
| 10268 | BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg4) |
| 10269 | .addReg(ARM::CPSR, RegState::Define) |
| 10270 | .addReg(NewVReg2, RegState::Kill) |
| 10271 | .addReg(NewVReg3) |
| 10272 | .add(predOps(ARMCC::AL)); |
| 10273 | |
| 10274 | MachineMemOperand *JTMMOLd = |
| 10275 | MF->getMachineMemOperand(MachinePointerInfo::getJumpTable(*MF), |
| 10276 | MachineMemOperand::MOLoad, 4, Align(4)); |
| 10277 | |
| 10278 | Register NewVReg5 = MRI->createVirtualRegister(TRC); |
| 10279 | BuildMI(DispContBB, dl, TII->get(ARM::tLDRi), NewVReg5) |
| 10280 | .addReg(NewVReg4, RegState::Kill) |
| 10281 | .addImm(0) |
| 10282 | .addMemOperand(JTMMOLd) |
| 10283 | .add(predOps(ARMCC::AL)); |
| 10284 | |
| 10285 | unsigned NewVReg6 = NewVReg5; |
| 10286 | if (IsPositionIndependent) { |
| 10287 | NewVReg6 = MRI->createVirtualRegister(TRC); |
| 10288 | BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg6) |
| 10289 | .addReg(ARM::CPSR, RegState::Define) |
| 10290 | .addReg(NewVReg5, RegState::Kill) |
| 10291 | .addReg(NewVReg3) |
| 10292 | .add(predOps(ARMCC::AL)); |
| 10293 | } |
| 10294 | |
| 10295 | BuildMI(DispContBB, dl, TII->get(ARM::tBR_JTr)) |
| 10296 | .addReg(NewVReg6, RegState::Kill) |
| 10297 | .addJumpTableIndex(MJTI); |
| 10298 | } else { |
| 10299 | Register NewVReg1 = MRI->createVirtualRegister(TRC); |
| 10300 | BuildMI(DispatchBB, dl, TII->get(ARM::LDRi12), NewVReg1) |
| 10301 | .addFrameIndex(FI) |
| 10302 | .addImm(4) |
| 10303 | .addMemOperand(FIMMOLd) |
| 10304 | .add(predOps(ARMCC::AL)); |
| 10305 | |
| 10306 | if (NumLPads < 256) { |
| 10307 | BuildMI(DispatchBB, dl, TII->get(ARM::CMPri)) |
| 10308 | .addReg(NewVReg1) |
| 10309 | .addImm(NumLPads) |
| 10310 | .add(predOps(ARMCC::AL)); |
| 10311 | } else if (Subtarget->hasV6T2Ops() && isUInt<16>(NumLPads)) { |
| 10312 | Register VReg1 = MRI->createVirtualRegister(TRC); |
| 10313 | BuildMI(DispatchBB, dl, TII->get(ARM::MOVi16), VReg1) |
| 10314 | .addImm(NumLPads & 0xFFFF) |
| 10315 | .add(predOps(ARMCC::AL)); |
| 10316 | |
| 10317 | unsigned VReg2 = VReg1; |
| 10318 | if ((NumLPads & 0xFFFF0000) != 0) { |
| 10319 | VReg2 = MRI->createVirtualRegister(TRC); |
| 10320 | BuildMI(DispatchBB, dl, TII->get(ARM::MOVTi16), VReg2) |
| 10321 | .addReg(VReg1) |
| 10322 | .addImm(NumLPads >> 16) |
| 10323 | .add(predOps(ARMCC::AL)); |
| 10324 | } |
| 10325 | |
| 10326 | BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr)) |
| 10327 | .addReg(NewVReg1) |
| 10328 | .addReg(VReg2) |
| 10329 | .add(predOps(ARMCC::AL)); |
| 10330 | } else { |
| 10331 | MachineConstantPool *ConstantPool = MF->getConstantPool(); |
| 10332 | Type *Int32Ty = Type::getInt32Ty(MF->getFunction().getContext()); |
| 10333 | const Constant *C = ConstantInt::get(Int32Ty, NumLPads); |
| 10334 | |
| 10335 | // MachineConstantPool wants an explicit alignment. |
| 10336 | Align Alignment = MF->getDataLayout().getPrefTypeAlign(Int32Ty); |
| 10337 | unsigned Idx = ConstantPool->getConstantPoolIndex(C, Alignment); |
| 10338 | |
| 10339 | Register VReg1 = MRI->createVirtualRegister(TRC); |
| 10340 | BuildMI(DispatchBB, dl, TII->get(ARM::LDRcp)) |
| 10341 | .addReg(VReg1, RegState::Define) |
| 10342 | .addConstantPoolIndex(Idx) |
| 10343 | .addImm(0) |
| 10344 | .add(predOps(ARMCC::AL)); |
| 10345 | BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr)) |
| 10346 | .addReg(NewVReg1) |
| 10347 | .addReg(VReg1, RegState::Kill) |
| 10348 | .add(predOps(ARMCC::AL)); |
| 10349 | } |
| 10350 | |
| 10351 | BuildMI(DispatchBB, dl, TII->get(ARM::Bcc)) |
| 10352 | .addMBB(TrapBB) |
| 10353 | .addImm(ARMCC::HI) |
| 10354 | .addReg(ARM::CPSR); |
| 10355 | |
| 10356 | Register NewVReg3 = MRI->createVirtualRegister(TRC); |
| 10357 | BuildMI(DispContBB, dl, TII->get(ARM::MOVsi), NewVReg3) |
| 10358 | .addReg(NewVReg1) |
| 10359 | .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2)) |
| 10360 | .add(predOps(ARMCC::AL)) |
| 10361 | .add(condCodeOp()); |
| 10362 | Register NewVReg4 = MRI->createVirtualRegister(TRC); |
| 10363 | BuildMI(DispContBB, dl, TII->get(ARM::LEApcrelJT), NewVReg4) |
| 10364 | .addJumpTableIndex(MJTI) |
| 10365 | .add(predOps(ARMCC::AL)); |
| 10366 | |
| 10367 | MachineMemOperand *JTMMOLd = |
| 10368 | MF->getMachineMemOperand(MachinePointerInfo::getJumpTable(*MF), |
| 10369 | MachineMemOperand::MOLoad, 4, Align(4)); |
| 10370 | Register NewVReg5 = MRI->createVirtualRegister(TRC); |
| 10371 | BuildMI(DispContBB, dl, TII->get(ARM::LDRrs), NewVReg5) |
| 10372 | .addReg(NewVReg3, RegState::Kill) |
| 10373 | .addReg(NewVReg4) |
| 10374 | .addImm(0) |
| 10375 | .addMemOperand(JTMMOLd) |
| 10376 | .add(predOps(ARMCC::AL)); |
| 10377 | |
| 10378 | if (IsPositionIndependent) { |
| 10379 | BuildMI(DispContBB, dl, TII->get(ARM::BR_JTadd)) |
| 10380 | .addReg(NewVReg5, RegState::Kill) |
| 10381 | .addReg(NewVReg4) |
| 10382 | .addJumpTableIndex(MJTI); |
| 10383 | } else { |
| 10384 | BuildMI(DispContBB, dl, TII->get(ARM::BR_JTr)) |
| 10385 | .addReg(NewVReg5, RegState::Kill) |
| 10386 | .addJumpTableIndex(MJTI); |
| 10387 | } |
| 10388 | } |
| 10389 | |
| 10390 | // Add the jump table entries as successors to the MBB. |
| 10391 | SmallPtrSet<MachineBasicBlock*, 8> SeenMBBs; |
| 10392 | for (std::vector<MachineBasicBlock*>::iterator |
| 10393 | I = LPadList.begin(), E = LPadList.end(); I != E; ++I) { |
| 10394 | MachineBasicBlock *CurMBB = *I; |
| 10395 | if (SeenMBBs.insert(CurMBB).second) |
| 10396 | DispContBB->addSuccessor(CurMBB); |
| 10397 | } |
| 10398 | |
| 10399 | // N.B. the order the invoke BBs are processed in doesn't matter here. |
| 10400 | const MCPhysReg *SavedRegs = RI.getCalleeSavedRegs(MF); |
| 10401 | SmallVector<MachineBasicBlock*, 64> MBBLPads; |
| 10402 | for (MachineBasicBlock *BB : InvokeBBs) { |
| 10403 | |
| 10404 | // Remove the landing pad successor from the invoke block and replace it |
| 10405 | // with the new dispatch block. |
| 10406 | SmallVector<MachineBasicBlock*, 4> Successors(BB->successors()); |
| 10407 | while (!Successors.empty()) { |
| 10408 | MachineBasicBlock *SMBB = Successors.pop_back_val(); |
| 10409 | if (SMBB->isEHPad()) { |
| 10410 | BB->removeSuccessor(SMBB); |
| 10411 | MBBLPads.push_back(SMBB); |
| 10412 | } |
| 10413 | } |
| 10414 | |
| 10415 | BB->addSuccessor(DispatchBB, BranchProbability::getZero()); |
| 10416 | BB->normalizeSuccProbs(); |
| 10417 | |
| 10418 | // Find the invoke call and mark all of the callee-saved registers as |
| 10419 | // 'implicit defined' so that they're spilled. This prevents code from |
| 10420 | // moving instructions to before the EH block, where they will never be |
| 10421 | // executed. |
| 10422 | for (MachineBasicBlock::reverse_iterator |
| 10423 | II = BB->rbegin(), IE = BB->rend(); II != IE; ++II) { |
| 10424 | if (!II->isCall()) continue; |
| 10425 | |
| 10426 | DenseMap<unsigned, bool> DefRegs; |
| 10427 | for (MachineInstr::mop_iterator |
| 10428 | OI = II->operands_begin(), OE = II->operands_end(); |
| 10429 | OI != OE; ++OI) { |
| 10430 | if (!OI->isReg()) continue; |
| 10431 | DefRegs[OI->getReg()] = true; |
| 10432 | } |
| 10433 | |
| 10434 | MachineInstrBuilder MIB(*MF, &*II); |
| 10435 | |
| 10436 | for (unsigned i = 0; SavedRegs[i] != 0; ++i) { |
| 10437 | unsigned Reg = SavedRegs[i]; |
| 10438 | if (Subtarget->isThumb2() && |
| 10439 | !ARM::tGPRRegClass.contains(Reg) && |
| 10440 | !ARM::hGPRRegClass.contains(Reg)) |
| 10441 | continue; |
| 10442 | if (Subtarget->isThumb1Only() && !ARM::tGPRRegClass.contains(Reg)) |
| 10443 | continue; |
| 10444 | if (!Subtarget->isThumb() && !ARM::GPRRegClass.contains(Reg)) |
| 10445 | continue; |
| 10446 | if (!DefRegs[Reg]) |
| 10447 | MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead); |
| 10448 | } |
| 10449 | |
| 10450 | break; |
| 10451 | } |
| 10452 | } |
| 10453 | |
| 10454 | // Mark all former landing pads as non-landing pads. The dispatch is the only |
| 10455 | // landing pad now. |
| 10456 | for (SmallVectorImpl<MachineBasicBlock*>::iterator |
| 10457 | I = MBBLPads.begin(), E = MBBLPads.end(); I != E; ++I) |
| 10458 | (*I)->setIsEHPad(false); |
| 10459 | |
| 10460 | // The instruction is gone now. |
| 10461 | MI.eraseFromParent(); |
| 10462 | } |
| 10463 | |
| 10464 | static |
| 10465 | MachineBasicBlock *OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ) { |
| 10466 | for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(), |
| 10467 | E = MBB->succ_end(); I != E; ++I) |
| 10468 | if (*I != Succ) |
| 10469 | return *I; |
| 10470 | llvm_unreachable("Expecting a BB with two successors!" ); |
| 10471 | } |
| 10472 | |
| 10473 | /// Return the load opcode for a given load size. If load size >= 8, |
| 10474 | /// neon opcode will be returned. |
| 10475 | static unsigned getLdOpcode(unsigned LdSize, bool IsThumb1, bool IsThumb2) { |
| 10476 | if (LdSize >= 8) |
| 10477 | return LdSize == 16 ? ARM::VLD1q32wb_fixed |
| 10478 | : LdSize == 8 ? ARM::VLD1d32wb_fixed : 0; |
| 10479 | if (IsThumb1) |
| 10480 | return LdSize == 4 ? ARM::tLDRi |
| 10481 | : LdSize == 2 ? ARM::tLDRHi |
| 10482 | : LdSize == 1 ? ARM::tLDRBi : 0; |
| 10483 | if (IsThumb2) |
| 10484 | return LdSize == 4 ? ARM::t2LDR_POST |
| 10485 | : LdSize == 2 ? ARM::t2LDRH_POST |
| 10486 | : LdSize == 1 ? ARM::t2LDRB_POST : 0; |
| 10487 | return LdSize == 4 ? ARM::LDR_POST_IMM |
| 10488 | : LdSize == 2 ? ARM::LDRH_POST |
| 10489 | : LdSize == 1 ? ARM::LDRB_POST_IMM : 0; |
| 10490 | } |
| 10491 | |
| 10492 | /// Return the store opcode for a given store size. If store size >= 8, |
| 10493 | /// neon opcode will be returned. |
| 10494 | static unsigned getStOpcode(unsigned StSize, bool IsThumb1, bool IsThumb2) { |
| 10495 | if (StSize >= 8) |
| 10496 | return StSize == 16 ? ARM::VST1q32wb_fixed |
| 10497 | : StSize == 8 ? ARM::VST1d32wb_fixed : 0; |
| 10498 | if (IsThumb1) |
| 10499 | return StSize == 4 ? ARM::tSTRi |
| 10500 | : StSize == 2 ? ARM::tSTRHi |
| 10501 | : StSize == 1 ? ARM::tSTRBi : 0; |
| 10502 | if (IsThumb2) |
| 10503 | return StSize == 4 ? ARM::t2STR_POST |
| 10504 | : StSize == 2 ? ARM::t2STRH_POST |
| 10505 | : StSize == 1 ? ARM::t2STRB_POST : 0; |
| 10506 | return StSize == 4 ? ARM::STR_POST_IMM |
| 10507 | : StSize == 2 ? ARM::STRH_POST |
| 10508 | : StSize == 1 ? ARM::STRB_POST_IMM : 0; |
| 10509 | } |
| 10510 | |
| 10511 | /// Emit a post-increment load operation with given size. The instructions |
| 10512 | /// will be added to BB at Pos. |
| 10513 | static void emitPostLd(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos, |
| 10514 | const TargetInstrInfo *TII, const DebugLoc &dl, |
| 10515 | unsigned LdSize, unsigned Data, unsigned AddrIn, |
| 10516 | unsigned AddrOut, bool IsThumb1, bool IsThumb2) { |
| 10517 | unsigned LdOpc = getLdOpcode(LdSize, IsThumb1, IsThumb2); |
| 10518 | assert(LdOpc != 0 && "Should have a load opcode" ); |
| 10519 | if (LdSize >= 8) { |
| 10520 | BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) |
| 10521 | .addReg(AddrOut, RegState::Define) |
| 10522 | .addReg(AddrIn) |
| 10523 | .addImm(0) |
| 10524 | .add(predOps(ARMCC::AL)); |
| 10525 | } else if (IsThumb1) { |
| 10526 | // load + update AddrIn |
| 10527 | BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) |
| 10528 | .addReg(AddrIn) |
| 10529 | .addImm(0) |
| 10530 | .add(predOps(ARMCC::AL)); |
| 10531 | BuildMI(*BB, Pos, dl, TII->get(ARM::tADDi8), AddrOut) |
| 10532 | .add(t1CondCodeOp()) |
| 10533 | .addReg(AddrIn) |
| 10534 | .addImm(LdSize) |
| 10535 | .add(predOps(ARMCC::AL)); |
| 10536 | } else if (IsThumb2) { |
| 10537 | BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) |
| 10538 | .addReg(AddrOut, RegState::Define) |
| 10539 | .addReg(AddrIn) |
| 10540 | .addImm(LdSize) |
| 10541 | .add(predOps(ARMCC::AL)); |
| 10542 | } else { // arm |
| 10543 | BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) |
| 10544 | .addReg(AddrOut, RegState::Define) |
| 10545 | .addReg(AddrIn) |
| 10546 | .addReg(0) |
| 10547 | .addImm(LdSize) |
| 10548 | .add(predOps(ARMCC::AL)); |
| 10549 | } |
| 10550 | } |
| 10551 | |
| 10552 | /// Emit a post-increment store operation with given size. The instructions |
| 10553 | /// will be added to BB at Pos. |
| 10554 | static void emitPostSt(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos, |
| 10555 | const TargetInstrInfo *TII, const DebugLoc &dl, |
| 10556 | unsigned StSize, unsigned Data, unsigned AddrIn, |
| 10557 | unsigned AddrOut, bool IsThumb1, bool IsThumb2) { |
| 10558 | unsigned StOpc = getStOpcode(StSize, IsThumb1, IsThumb2); |
| 10559 | assert(StOpc != 0 && "Should have a store opcode" ); |
| 10560 | if (StSize >= 8) { |
| 10561 | BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut) |
| 10562 | .addReg(AddrIn) |
| 10563 | .addImm(0) |
| 10564 | .addReg(Data) |
| 10565 | .add(predOps(ARMCC::AL)); |
| 10566 | } else if (IsThumb1) { |
| 10567 | // store + update AddrIn |
| 10568 | BuildMI(*BB, Pos, dl, TII->get(StOpc)) |
| 10569 | .addReg(Data) |
| 10570 | .addReg(AddrIn) |
| 10571 | .addImm(0) |
| 10572 | .add(predOps(ARMCC::AL)); |
| 10573 | BuildMI(*BB, Pos, dl, TII->get(ARM::tADDi8), AddrOut) |
| 10574 | .add(t1CondCodeOp()) |
| 10575 | .addReg(AddrIn) |
| 10576 | .addImm(StSize) |
| 10577 | .add(predOps(ARMCC::AL)); |
| 10578 | } else if (IsThumb2) { |
| 10579 | BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut) |
| 10580 | .addReg(Data) |
| 10581 | .addReg(AddrIn) |
| 10582 | .addImm(StSize) |
| 10583 | .add(predOps(ARMCC::AL)); |
| 10584 | } else { // arm |
| 10585 | BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut) |
| 10586 | .addReg(Data) |
| 10587 | .addReg(AddrIn) |
| 10588 | .addReg(0) |
| 10589 | .addImm(StSize) |
| 10590 | .add(predOps(ARMCC::AL)); |
| 10591 | } |
| 10592 | } |
| 10593 | |
| 10594 | MachineBasicBlock * |
| 10595 | ARMTargetLowering::EmitStructByval(MachineInstr &MI, |
| 10596 | MachineBasicBlock *BB) const { |
| 10597 | // This pseudo instruction has 3 operands: dst, src, size |
| 10598 | // We expand it to a loop if size > Subtarget->getMaxInlineSizeThreshold(). |
| 10599 | // Otherwise, we will generate unrolled scalar copies. |
| 10600 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
| 10601 | const BasicBlock *LLVM_BB = BB->getBasicBlock(); |
| 10602 | MachineFunction::iterator It = ++BB->getIterator(); |
| 10603 | |
| 10604 | Register dest = MI.getOperand(0).getReg(); |
| 10605 | Register src = MI.getOperand(1).getReg(); |
| 10606 | unsigned SizeVal = MI.getOperand(2).getImm(); |
| 10607 | unsigned Alignment = MI.getOperand(3).getImm(); |
| 10608 | DebugLoc dl = MI.getDebugLoc(); |
| 10609 | |
| 10610 | MachineFunction *MF = BB->getParent(); |
| 10611 | MachineRegisterInfo &MRI = MF->getRegInfo(); |
| 10612 | unsigned UnitSize = 0; |
| 10613 | const TargetRegisterClass *TRC = nullptr; |
| 10614 | const TargetRegisterClass *VecTRC = nullptr; |
| 10615 | |
| 10616 | bool IsThumb1 = Subtarget->isThumb1Only(); |
| 10617 | bool IsThumb2 = Subtarget->isThumb2(); |
| 10618 | bool IsThumb = Subtarget->isThumb(); |
| 10619 | |
| 10620 | if (Alignment & 1) { |
| 10621 | UnitSize = 1; |
| 10622 | } else if (Alignment & 2) { |
| 10623 | UnitSize = 2; |
| 10624 | } else { |
| 10625 | // Check whether we can use NEON instructions. |
| 10626 | if (!MF->getFunction().hasFnAttribute(Attribute::NoImplicitFloat) && |
| 10627 | Subtarget->hasNEON()) { |
| 10628 | if ((Alignment % 16 == 0) && SizeVal >= 16) |
| 10629 | UnitSize = 16; |
| 10630 | else if ((Alignment % 8 == 0) && SizeVal >= 8) |
| 10631 | UnitSize = 8; |
| 10632 | } |
| 10633 | // Can't use NEON instructions. |
| 10634 | if (UnitSize == 0) |
| 10635 | UnitSize = 4; |
| 10636 | } |
| 10637 | |
| 10638 | // Select the correct opcode and register class for unit size load/store |
| 10639 | bool IsNeon = UnitSize >= 8; |
| 10640 | TRC = IsThumb ? &ARM::tGPRRegClass : &ARM::GPRRegClass; |
| 10641 | if (IsNeon) |
| 10642 | VecTRC = UnitSize == 16 ? &ARM::DPairRegClass |
| 10643 | : UnitSize == 8 ? &ARM::DPRRegClass |
| 10644 | : nullptr; |
| 10645 | |
| 10646 | unsigned BytesLeft = SizeVal % UnitSize; |
| 10647 | unsigned LoopSize = SizeVal - BytesLeft; |
| 10648 | |
| 10649 | if (SizeVal <= Subtarget->getMaxInlineSizeThreshold()) { |
| 10650 | // Use LDR and STR to copy. |
| 10651 | // [scratch, srcOut] = LDR_POST(srcIn, UnitSize) |
| 10652 | // [destOut] = STR_POST(scratch, destIn, UnitSize) |
| 10653 | unsigned srcIn = src; |
| 10654 | unsigned destIn = dest; |
| 10655 | for (unsigned i = 0; i < LoopSize; i+=UnitSize) { |
| 10656 | Register srcOut = MRI.createVirtualRegister(TRC); |
| 10657 | Register destOut = MRI.createVirtualRegister(TRC); |
| 10658 | Register scratch = MRI.createVirtualRegister(IsNeon ? VecTRC : TRC); |
| 10659 | emitPostLd(BB, MI, TII, dl, UnitSize, scratch, srcIn, srcOut, |
| 10660 | IsThumb1, IsThumb2); |
| 10661 | emitPostSt(BB, MI, TII, dl, UnitSize, scratch, destIn, destOut, |
| 10662 | IsThumb1, IsThumb2); |
| 10663 | srcIn = srcOut; |
| 10664 | destIn = destOut; |
| 10665 | } |
| 10666 | |
| 10667 | // Handle the leftover bytes with LDRB and STRB. |
| 10668 | // [scratch, srcOut] = LDRB_POST(srcIn, 1) |
| 10669 | // [destOut] = STRB_POST(scratch, destIn, 1) |
| 10670 | for (unsigned i = 0; i < BytesLeft; i++) { |
| 10671 | Register srcOut = MRI.createVirtualRegister(TRC); |
| 10672 | Register destOut = MRI.createVirtualRegister(TRC); |
| 10673 | Register scratch = MRI.createVirtualRegister(TRC); |
| 10674 | emitPostLd(BB, MI, TII, dl, 1, scratch, srcIn, srcOut, |
| 10675 | IsThumb1, IsThumb2); |
| 10676 | emitPostSt(BB, MI, TII, dl, 1, scratch, destIn, destOut, |
| 10677 | IsThumb1, IsThumb2); |
| 10678 | srcIn = srcOut; |
| 10679 | destIn = destOut; |
| 10680 | } |
| 10681 | MI.eraseFromParent(); // The instruction is gone now. |
| 10682 | return BB; |
| 10683 | } |
| 10684 | |
| 10685 | // Expand the pseudo op to a loop. |
| 10686 | // thisMBB: |
| 10687 | // ... |
| 10688 | // movw varEnd, # --> with thumb2 |
| 10689 | // movt varEnd, # |
| 10690 | // ldrcp varEnd, idx --> without thumb2 |
| 10691 | // fallthrough --> loopMBB |
| 10692 | // loopMBB: |
| 10693 | // PHI varPhi, varEnd, varLoop |
| 10694 | // PHI srcPhi, src, srcLoop |
| 10695 | // PHI destPhi, dst, destLoop |
| 10696 | // [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize) |
| 10697 | // [destLoop] = STR_POST(scratch, destPhi, UnitSize) |
| 10698 | // subs varLoop, varPhi, #UnitSize |
| 10699 | // bne loopMBB |
| 10700 | // fallthrough --> exitMBB |
| 10701 | // exitMBB: |
| 10702 | // epilogue to handle left-over bytes |
| 10703 | // [scratch, srcOut] = LDRB_POST(srcLoop, 1) |
| 10704 | // [destOut] = STRB_POST(scratch, destLoop, 1) |
| 10705 | MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); |
| 10706 | MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); |
| 10707 | MF->insert(It, loopMBB); |
| 10708 | MF->insert(It, exitMBB); |
| 10709 | |
| 10710 | // Transfer the remainder of BB and its successor edges to exitMBB. |
| 10711 | exitMBB->splice(exitMBB->begin(), BB, |
| 10712 | std::next(MachineBasicBlock::iterator(MI)), BB->end()); |
| 10713 | exitMBB->transferSuccessorsAndUpdatePHIs(BB); |
| 10714 | |
| 10715 | // Load an immediate to varEnd. |
| 10716 | Register varEnd = MRI.createVirtualRegister(TRC); |
| 10717 | if (Subtarget->useMovt()) { |
| 10718 | unsigned Vtmp = varEnd; |
| 10719 | if ((LoopSize & 0xFFFF0000) != 0) |
| 10720 | Vtmp = MRI.createVirtualRegister(TRC); |
| 10721 | BuildMI(BB, dl, TII->get(IsThumb ? ARM::t2MOVi16 : ARM::MOVi16), Vtmp) |
| 10722 | .addImm(LoopSize & 0xFFFF) |
| 10723 | .add(predOps(ARMCC::AL)); |
| 10724 | |
| 10725 | if ((LoopSize & 0xFFFF0000) != 0) |
| 10726 | BuildMI(BB, dl, TII->get(IsThumb ? ARM::t2MOVTi16 : ARM::MOVTi16), varEnd) |
| 10727 | .addReg(Vtmp) |
| 10728 | .addImm(LoopSize >> 16) |
| 10729 | .add(predOps(ARMCC::AL)); |
| 10730 | } else { |
| 10731 | MachineConstantPool *ConstantPool = MF->getConstantPool(); |
| 10732 | Type *Int32Ty = Type::getInt32Ty(MF->getFunction().getContext()); |
| 10733 | const Constant *C = ConstantInt::get(Int32Ty, LoopSize); |
| 10734 | |
| 10735 | // MachineConstantPool wants an explicit alignment. |
| 10736 | Align Alignment = MF->getDataLayout().getPrefTypeAlign(Int32Ty); |
| 10737 | unsigned Idx = ConstantPool->getConstantPoolIndex(C, Alignment); |
| 10738 | MachineMemOperand *CPMMO = |
| 10739 | MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(*MF), |
| 10740 | MachineMemOperand::MOLoad, 4, Align(4)); |
| 10741 | |
| 10742 | if (IsThumb) |
| 10743 | BuildMI(*BB, MI, dl, TII->get(ARM::tLDRpci)) |
| 10744 | .addReg(varEnd, RegState::Define) |
| 10745 | .addConstantPoolIndex(Idx) |
| 10746 | .add(predOps(ARMCC::AL)) |
| 10747 | .addMemOperand(CPMMO); |
| 10748 | else |
| 10749 | BuildMI(*BB, MI, dl, TII->get(ARM::LDRcp)) |
| 10750 | .addReg(varEnd, RegState::Define) |
| 10751 | .addConstantPoolIndex(Idx) |
| 10752 | .addImm(0) |
| 10753 | .add(predOps(ARMCC::AL)) |
| 10754 | .addMemOperand(CPMMO); |
| 10755 | } |
| 10756 | BB->addSuccessor(loopMBB); |
| 10757 | |
| 10758 | // Generate the loop body: |
| 10759 | // varPhi = PHI(varLoop, varEnd) |
| 10760 | // srcPhi = PHI(srcLoop, src) |
| 10761 | // destPhi = PHI(destLoop, dst) |
| 10762 | MachineBasicBlock *entryBB = BB; |
| 10763 | BB = loopMBB; |
| 10764 | Register varLoop = MRI.createVirtualRegister(TRC); |
| 10765 | Register varPhi = MRI.createVirtualRegister(TRC); |
| 10766 | Register srcLoop = MRI.createVirtualRegister(TRC); |
| 10767 | Register srcPhi = MRI.createVirtualRegister(TRC); |
| 10768 | Register destLoop = MRI.createVirtualRegister(TRC); |
| 10769 | Register destPhi = MRI.createVirtualRegister(TRC); |
| 10770 | |
| 10771 | BuildMI(*BB, BB->begin(), dl, TII->get(ARM::PHI), varPhi) |
| 10772 | .addReg(varLoop).addMBB(loopMBB) |
| 10773 | .addReg(varEnd).addMBB(entryBB); |
| 10774 | BuildMI(BB, dl, TII->get(ARM::PHI), srcPhi) |
| 10775 | .addReg(srcLoop).addMBB(loopMBB) |
| 10776 | .addReg(src).addMBB(entryBB); |
| 10777 | BuildMI(BB, dl, TII->get(ARM::PHI), destPhi) |
| 10778 | .addReg(destLoop).addMBB(loopMBB) |
| 10779 | .addReg(dest).addMBB(entryBB); |
| 10780 | |
| 10781 | // [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize) |
| 10782 | // [destLoop] = STR_POST(scratch, destPhi, UnitSiz) |
| 10783 | Register scratch = MRI.createVirtualRegister(IsNeon ? VecTRC : TRC); |
| 10784 | emitPostLd(BB, BB->end(), TII, dl, UnitSize, scratch, srcPhi, srcLoop, |
| 10785 | IsThumb1, IsThumb2); |
| 10786 | emitPostSt(BB, BB->end(), TII, dl, UnitSize, scratch, destPhi, destLoop, |
| 10787 | IsThumb1, IsThumb2); |
| 10788 | |
| 10789 | // Decrement loop variable by UnitSize. |
| 10790 | if (IsThumb1) { |
| 10791 | BuildMI(*BB, BB->end(), dl, TII->get(ARM::tSUBi8), varLoop) |
| 10792 | .add(t1CondCodeOp()) |
| 10793 | .addReg(varPhi) |
| 10794 | .addImm(UnitSize) |
| 10795 | .add(predOps(ARMCC::AL)); |
| 10796 | } else { |
| 10797 | MachineInstrBuilder MIB = |
| 10798 | BuildMI(*BB, BB->end(), dl, |
| 10799 | TII->get(IsThumb2 ? ARM::t2SUBri : ARM::SUBri), varLoop); |
| 10800 | MIB.addReg(varPhi) |
| 10801 | .addImm(UnitSize) |
| 10802 | .add(predOps(ARMCC::AL)) |
| 10803 | .add(condCodeOp()); |
| 10804 | MIB->getOperand(5).setReg(ARM::CPSR); |
| 10805 | MIB->getOperand(5).setIsDef(true); |
| 10806 | } |
| 10807 | BuildMI(*BB, BB->end(), dl, |
| 10808 | TII->get(IsThumb1 ? ARM::tBcc : IsThumb2 ? ARM::t2Bcc : ARM::Bcc)) |
| 10809 | .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); |
| 10810 | |
| 10811 | // loopMBB can loop back to loopMBB or fall through to exitMBB. |
| 10812 | BB->addSuccessor(loopMBB); |
| 10813 | BB->addSuccessor(exitMBB); |
| 10814 | |
| 10815 | // Add epilogue to handle BytesLeft. |
| 10816 | BB = exitMBB; |
| 10817 | auto StartOfExit = exitMBB->begin(); |
| 10818 | |
| 10819 | // [scratch, srcOut] = LDRB_POST(srcLoop, 1) |
| 10820 | // [destOut] = STRB_POST(scratch, destLoop, 1) |
| 10821 | unsigned srcIn = srcLoop; |
| 10822 | unsigned destIn = destLoop; |
| 10823 | for (unsigned i = 0; i < BytesLeft; i++) { |
| 10824 | Register srcOut = MRI.createVirtualRegister(TRC); |
| 10825 | Register destOut = MRI.createVirtualRegister(TRC); |
| 10826 | Register scratch = MRI.createVirtualRegister(TRC); |
| 10827 | emitPostLd(BB, StartOfExit, TII, dl, 1, scratch, srcIn, srcOut, |
| 10828 | IsThumb1, IsThumb2); |
| 10829 | emitPostSt(BB, StartOfExit, TII, dl, 1, scratch, destIn, destOut, |
| 10830 | IsThumb1, IsThumb2); |
| 10831 | srcIn = srcOut; |
| 10832 | destIn = destOut; |
| 10833 | } |
| 10834 | |
| 10835 | MI.eraseFromParent(); // The instruction is gone now. |
| 10836 | return BB; |
| 10837 | } |
| 10838 | |
| 10839 | MachineBasicBlock * |
| 10840 | ARMTargetLowering::EmitLowered__chkstk(MachineInstr &MI, |
| 10841 | MachineBasicBlock *MBB) const { |
| 10842 | const TargetMachine &TM = getTargetMachine(); |
| 10843 | const TargetInstrInfo &TII = *Subtarget->getInstrInfo(); |
| 10844 | DebugLoc DL = MI.getDebugLoc(); |
| 10845 | |
| 10846 | assert(Subtarget->isTargetWindows() && |
| 10847 | "__chkstk is only supported on Windows" ); |
| 10848 | assert(Subtarget->isThumb2() && "Windows on ARM requires Thumb-2 mode" ); |
| 10849 | |
| 10850 | // __chkstk takes the number of words to allocate on the stack in R4, and |
| 10851 | // returns the stack adjustment in number of bytes in R4. This will not |
| 10852 | // clober any other registers (other than the obvious lr). |
| 10853 | // |
| 10854 | // Although, technically, IP should be considered a register which may be |
| 10855 | // clobbered, the call itself will not touch it. Windows on ARM is a pure |
| 10856 | // thumb-2 environment, so there is no interworking required. As a result, we |
| 10857 | // do not expect a veneer to be emitted by the linker, clobbering IP. |
| 10858 | // |
| 10859 | // Each module receives its own copy of __chkstk, so no import thunk is |
| 10860 | // required, again, ensuring that IP is not clobbered. |
| 10861 | // |
| 10862 | // Finally, although some linkers may theoretically provide a trampoline for |
| 10863 | // out of range calls (which is quite common due to a 32M range limitation of |
| 10864 | // branches for Thumb), we can generate the long-call version via |
| 10865 | // -mcmodel=large, alleviating the need for the trampoline which may clobber |
| 10866 | // IP. |
| 10867 | |
| 10868 | switch (TM.getCodeModel()) { |
| 10869 | case CodeModel::Tiny: |
| 10870 | llvm_unreachable("Tiny code model not available on ARM." ); |
| 10871 | case CodeModel::Small: |
| 10872 | case CodeModel::Medium: |
| 10873 | case CodeModel::Kernel: |
| 10874 | BuildMI(*MBB, MI, DL, TII.get(ARM::tBL)) |
| 10875 | .add(predOps(ARMCC::AL)) |
| 10876 | .addExternalSymbol("__chkstk" ) |
| 10877 | .addReg(ARM::R4, RegState::Implicit | RegState::Kill) |
| 10878 | .addReg(ARM::R4, RegState::Implicit | RegState::Define) |
| 10879 | .addReg(ARM::R12, |
| 10880 | RegState::Implicit | RegState::Define | RegState::Dead) |
| 10881 | .addReg(ARM::CPSR, |
| 10882 | RegState::Implicit | RegState::Define | RegState::Dead); |
| 10883 | break; |
| 10884 | case CodeModel::Large: { |
| 10885 | MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); |
| 10886 | Register Reg = MRI.createVirtualRegister(&ARM::rGPRRegClass); |
| 10887 | |
| 10888 | BuildMI(*MBB, MI, DL, TII.get(ARM::t2MOVi32imm), Reg) |
| 10889 | .addExternalSymbol("__chkstk" ); |
| 10890 | BuildMI(*MBB, MI, DL, TII.get(gettBLXrOpcode(*MBB->getParent()))) |
| 10891 | .add(predOps(ARMCC::AL)) |
| 10892 | .addReg(Reg, RegState::Kill) |
| 10893 | .addReg(ARM::R4, RegState::Implicit | RegState::Kill) |
| 10894 | .addReg(ARM::R4, RegState::Implicit | RegState::Define) |
| 10895 | .addReg(ARM::R12, |
| 10896 | RegState::Implicit | RegState::Define | RegState::Dead) |
| 10897 | .addReg(ARM::CPSR, |
| 10898 | RegState::Implicit | RegState::Define | RegState::Dead); |
| 10899 | break; |
| 10900 | } |
| 10901 | } |
| 10902 | |
| 10903 | BuildMI(*MBB, MI, DL, TII.get(ARM::t2SUBrr), ARM::SP) |
| 10904 | .addReg(ARM::SP, RegState::Kill) |
| 10905 | .addReg(ARM::R4, RegState::Kill) |
| 10906 | .setMIFlags(MachineInstr::FrameSetup) |
| 10907 | .add(predOps(ARMCC::AL)) |
| 10908 | .add(condCodeOp()); |
| 10909 | |
| 10910 | MI.eraseFromParent(); |
| 10911 | return MBB; |
| 10912 | } |
| 10913 | |
| 10914 | MachineBasicBlock * |
| 10915 | ARMTargetLowering::EmitLowered__dbzchk(MachineInstr &MI, |
| 10916 | MachineBasicBlock *MBB) const { |
| 10917 | DebugLoc DL = MI.getDebugLoc(); |
| 10918 | MachineFunction *MF = MBB->getParent(); |
| 10919 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
| 10920 | |
| 10921 | MachineBasicBlock *ContBB = MF->CreateMachineBasicBlock(); |
| 10922 | MF->insert(++MBB->getIterator(), ContBB); |
| 10923 | ContBB->splice(ContBB->begin(), MBB, |
| 10924 | std::next(MachineBasicBlock::iterator(MI)), MBB->end()); |
| 10925 | ContBB->transferSuccessorsAndUpdatePHIs(MBB); |
| 10926 | MBB->addSuccessor(ContBB); |
| 10927 | |
| 10928 | MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock(); |
| 10929 | BuildMI(TrapBB, DL, TII->get(ARM::t__brkdiv0)); |
| 10930 | MF->push_back(TrapBB); |
| 10931 | MBB->addSuccessor(TrapBB); |
| 10932 | |
| 10933 | BuildMI(*MBB, MI, DL, TII->get(ARM::tCMPi8)) |
| 10934 | .addReg(MI.getOperand(0).getReg()) |
| 10935 | .addImm(0) |
| 10936 | .add(predOps(ARMCC::AL)); |
| 10937 | BuildMI(*MBB, MI, DL, TII->get(ARM::t2Bcc)) |
| 10938 | .addMBB(TrapBB) |
| 10939 | .addImm(ARMCC::EQ) |
| 10940 | .addReg(ARM::CPSR); |
| 10941 | |
| 10942 | MI.eraseFromParent(); |
| 10943 | return ContBB; |
| 10944 | } |
| 10945 | |
| 10946 | // The CPSR operand of SelectItr might be missing a kill marker |
| 10947 | // because there were multiple uses of CPSR, and ISel didn't know |
| 10948 | // which to mark. Figure out whether SelectItr should have had a |
| 10949 | // kill marker, and set it if it should. Returns the correct kill |
| 10950 | // marker value. |
| 10951 | static bool checkAndUpdateCPSRKill(MachineBasicBlock::iterator SelectItr, |
| 10952 | MachineBasicBlock* BB, |
| 10953 | const TargetRegisterInfo* TRI) { |
| 10954 | // Scan forward through BB for a use/def of CPSR. |
| 10955 | MachineBasicBlock::iterator miI(std::next(SelectItr)); |
| 10956 | for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) { |
| 10957 | const MachineInstr& mi = *miI; |
| 10958 | if (mi.readsRegister(ARM::CPSR)) |
| 10959 | return false; |
| 10960 | if (mi.definesRegister(ARM::CPSR)) |
| 10961 | break; // Should have kill-flag - update below. |
| 10962 | } |
| 10963 | |
| 10964 | // If we hit the end of the block, check whether CPSR is live into a |
| 10965 | // successor. |
| 10966 | if (miI == BB->end()) { |
| 10967 | for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(), |
| 10968 | sEnd = BB->succ_end(); |
| 10969 | sItr != sEnd; ++sItr) { |
| 10970 | MachineBasicBlock* succ = *sItr; |
| 10971 | if (succ->isLiveIn(ARM::CPSR)) |
| 10972 | return false; |
| 10973 | } |
| 10974 | } |
| 10975 | |
| 10976 | // We found a def, or hit the end of the basic block and CPSR wasn't live |
| 10977 | // out. SelectMI should have a kill flag on CPSR. |
| 10978 | SelectItr->addRegisterKilled(ARM::CPSR, TRI); |
| 10979 | return true; |
| 10980 | } |
| 10981 | |
| 10982 | MachineBasicBlock * |
| 10983 | ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, |
| 10984 | MachineBasicBlock *BB) const { |
| 10985 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
| 10986 | DebugLoc dl = MI.getDebugLoc(); |
| 10987 | bool isThumb2 = Subtarget->isThumb2(); |
| 10988 | switch (MI.getOpcode()) { |
| 10989 | default: { |
| 10990 | MI.print(errs()); |
| 10991 | llvm_unreachable("Unexpected instr type to insert" ); |
| 10992 | } |
| 10993 | |
| 10994 | // Thumb1 post-indexed loads are really just single-register LDMs. |
| 10995 | case ARM::tLDR_postidx: { |
| 10996 | MachineOperand Def(MI.getOperand(1)); |
| 10997 | BuildMI(*BB, MI, dl, TII->get(ARM::tLDMIA_UPD)) |
| 10998 | .add(Def) // Rn_wb |
| 10999 | .add(MI.getOperand(2)) // Rn |
| 11000 | .add(MI.getOperand(3)) // PredImm |
| 11001 | .add(MI.getOperand(4)) // PredReg |
| 11002 | .add(MI.getOperand(0)) // Rt |
| 11003 | .cloneMemRefs(MI); |
| 11004 | MI.eraseFromParent(); |
| 11005 | return BB; |
| 11006 | } |
| 11007 | |
| 11008 | // The Thumb2 pre-indexed stores have the same MI operands, they just |
| 11009 | // define them differently in the .td files from the isel patterns, so |
| 11010 | // they need pseudos. |
| 11011 | case ARM::t2STR_preidx: |
| 11012 | MI.setDesc(TII->get(ARM::t2STR_PRE)); |
| 11013 | return BB; |
| 11014 | case ARM::t2STRB_preidx: |
| 11015 | MI.setDesc(TII->get(ARM::t2STRB_PRE)); |
| 11016 | return BB; |
| 11017 | case ARM::t2STRH_preidx: |
| 11018 | MI.setDesc(TII->get(ARM::t2STRH_PRE)); |
| 11019 | return BB; |
| 11020 | |
| 11021 | case ARM::STRi_preidx: |
| 11022 | case ARM::STRBi_preidx: { |
| 11023 | unsigned NewOpc = MI.getOpcode() == ARM::STRi_preidx ? ARM::STR_PRE_IMM |
| 11024 | : ARM::STRB_PRE_IMM; |
| 11025 | // Decode the offset. |
| 11026 | unsigned Offset = MI.getOperand(4).getImm(); |
| 11027 | bool isSub = ARM_AM::getAM2Op(Offset) == ARM_AM::sub; |
| 11028 | Offset = ARM_AM::getAM2Offset(Offset); |
| 11029 | if (isSub) |
| 11030 | Offset = -Offset; |
| 11031 | |
| 11032 | MachineMemOperand *MMO = *MI.memoperands_begin(); |
| 11033 | BuildMI(*BB, MI, dl, TII->get(NewOpc)) |
| 11034 | .add(MI.getOperand(0)) // Rn_wb |
| 11035 | .add(MI.getOperand(1)) // Rt |
| 11036 | .add(MI.getOperand(2)) // Rn |
| 11037 | .addImm(Offset) // offset (skip GPR==zero_reg) |
| 11038 | .add(MI.getOperand(5)) // pred |
| 11039 | .add(MI.getOperand(6)) |
| 11040 | .addMemOperand(MMO); |
| 11041 | MI.eraseFromParent(); |
| 11042 | return BB; |
| 11043 | } |
| 11044 | case ARM::STRr_preidx: |
| 11045 | case ARM::STRBr_preidx: |
| 11046 | case ARM::STRH_preidx: { |
| 11047 | unsigned NewOpc; |
| 11048 | switch (MI.getOpcode()) { |
| 11049 | default: llvm_unreachable("unexpected opcode!" ); |
| 11050 | case ARM::STRr_preidx: NewOpc = ARM::STR_PRE_REG; break; |
| 11051 | case ARM::STRBr_preidx: NewOpc = ARM::STRB_PRE_REG; break; |
| 11052 | case ARM::STRH_preidx: NewOpc = ARM::STRH_PRE; break; |
| 11053 | } |
| 11054 | MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(NewOpc)); |
| 11055 | for (unsigned i = 0; i < MI.getNumOperands(); ++i) |
| 11056 | MIB.add(MI.getOperand(i)); |
| 11057 | MI.eraseFromParent(); |
| 11058 | return BB; |
| 11059 | } |
| 11060 | |
| 11061 | case ARM::tMOVCCr_pseudo: { |
| 11062 | // To "insert" a SELECT_CC instruction, we actually have to insert the |
| 11063 | // diamond control-flow pattern. The incoming instruction knows the |
| 11064 | // destination vreg to set, the condition code register to branch on, the |
| 11065 | // true/false values to select between, and a branch opcode to use. |
| 11066 | const BasicBlock *LLVM_BB = BB->getBasicBlock(); |
| 11067 | MachineFunction::iterator It = ++BB->getIterator(); |
| 11068 | |
| 11069 | // thisMBB: |
| 11070 | // ... |
| 11071 | // TrueVal = ... |
| 11072 | // cmpTY ccX, r1, r2 |
| 11073 | // bCC copy1MBB |
| 11074 | // fallthrough --> copy0MBB |
| 11075 | MachineBasicBlock *thisMBB = BB; |
| 11076 | MachineFunction *F = BB->getParent(); |
| 11077 | MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); |
| 11078 | MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); |
| 11079 | F->insert(It, copy0MBB); |
| 11080 | F->insert(It, sinkMBB); |
| 11081 | |
| 11082 | // Check whether CPSR is live past the tMOVCCr_pseudo. |
| 11083 | const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo(); |
| 11084 | if (!MI.killsRegister(ARM::CPSR) && |
| 11085 | !checkAndUpdateCPSRKill(MI, thisMBB, TRI)) { |
| 11086 | copy0MBB->addLiveIn(ARM::CPSR); |
| 11087 | sinkMBB->addLiveIn(ARM::CPSR); |
| 11088 | } |
| 11089 | |
| 11090 | // Transfer the remainder of BB and its successor edges to sinkMBB. |
| 11091 | sinkMBB->splice(sinkMBB->begin(), BB, |
| 11092 | std::next(MachineBasicBlock::iterator(MI)), BB->end()); |
| 11093 | sinkMBB->transferSuccessorsAndUpdatePHIs(BB); |
| 11094 | |
| 11095 | BB->addSuccessor(copy0MBB); |
| 11096 | BB->addSuccessor(sinkMBB); |
| 11097 | |
| 11098 | BuildMI(BB, dl, TII->get(ARM::tBcc)) |
| 11099 | .addMBB(sinkMBB) |
| 11100 | .addImm(MI.getOperand(3).getImm()) |
| 11101 | .addReg(MI.getOperand(4).getReg()); |
| 11102 | |
| 11103 | // copy0MBB: |
| 11104 | // %FalseValue = ... |
| 11105 | // # fallthrough to sinkMBB |
| 11106 | BB = copy0MBB; |
| 11107 | |
| 11108 | // Update machine-CFG edges |
| 11109 | BB->addSuccessor(sinkMBB); |
| 11110 | |
| 11111 | // sinkMBB: |
| 11112 | // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] |
| 11113 | // ... |
| 11114 | BB = sinkMBB; |
| 11115 | BuildMI(*BB, BB->begin(), dl, TII->get(ARM::PHI), MI.getOperand(0).getReg()) |
| 11116 | .addReg(MI.getOperand(1).getReg()) |
| 11117 | .addMBB(copy0MBB) |
| 11118 | .addReg(MI.getOperand(2).getReg()) |
| 11119 | .addMBB(thisMBB); |
| 11120 | |
| 11121 | MI.eraseFromParent(); // The pseudo instruction is gone now. |
| 11122 | return BB; |
| 11123 | } |
| 11124 | |
| 11125 | case ARM::BCCi64: |
| 11126 | case ARM::BCCZi64: { |
| 11127 | // If there is an unconditional branch to the other successor, remove it. |
| 11128 | BB->erase(std::next(MachineBasicBlock::iterator(MI)), BB->end()); |
| 11129 | |
| 11130 | // Compare both parts that make up the double comparison separately for |
| 11131 | // equality. |
| 11132 | bool RHSisZero = MI.getOpcode() == ARM::BCCZi64; |
| 11133 | |
| 11134 | Register LHS1 = MI.getOperand(1).getReg(); |
| 11135 | Register LHS2 = MI.getOperand(2).getReg(); |
| 11136 | if (RHSisZero) { |
| 11137 | BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) |
| 11138 | .addReg(LHS1) |
| 11139 | .addImm(0) |
| 11140 | .add(predOps(ARMCC::AL)); |
| 11141 | BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) |
| 11142 | .addReg(LHS2).addImm(0) |
| 11143 | .addImm(ARMCC::EQ).addReg(ARM::CPSR); |
| 11144 | } else { |
| 11145 | Register RHS1 = MI.getOperand(3).getReg(); |
| 11146 | Register RHS2 = MI.getOperand(4).getReg(); |
| 11147 | BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) |
| 11148 | .addReg(LHS1) |
| 11149 | .addReg(RHS1) |
| 11150 | .add(predOps(ARMCC::AL)); |
| 11151 | BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) |
| 11152 | .addReg(LHS2).addReg(RHS2) |
| 11153 | .addImm(ARMCC::EQ).addReg(ARM::CPSR); |
| 11154 | } |
| 11155 | |
| 11156 | MachineBasicBlock *destMBB = MI.getOperand(RHSisZero ? 3 : 5).getMBB(); |
| 11157 | MachineBasicBlock *exitMBB = OtherSucc(BB, destMBB); |
| 11158 | if (MI.getOperand(0).getImm() == ARMCC::NE) |
| 11159 | std::swap(destMBB, exitMBB); |
| 11160 | |
| 11161 | BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) |
| 11162 | .addMBB(destMBB).addImm(ARMCC::EQ).addReg(ARM::CPSR); |
| 11163 | if (isThumb2) |
| 11164 | BuildMI(BB, dl, TII->get(ARM::t2B)) |
| 11165 | .addMBB(exitMBB) |
| 11166 | .add(predOps(ARMCC::AL)); |
| 11167 | else |
| 11168 | BuildMI(BB, dl, TII->get(ARM::B)) .addMBB(exitMBB); |
| 11169 | |
| 11170 | MI.eraseFromParent(); // The pseudo instruction is gone now. |
| 11171 | return BB; |
| 11172 | } |
| 11173 | |
| 11174 | case ARM::Int_eh_sjlj_setjmp: |
| 11175 | case ARM::Int_eh_sjlj_setjmp_nofp: |
| 11176 | case ARM::tInt_eh_sjlj_setjmp: |
| 11177 | case ARM::t2Int_eh_sjlj_setjmp: |
| 11178 | case ARM::t2Int_eh_sjlj_setjmp_nofp: |
| 11179 | return BB; |
| 11180 | |
| 11181 | case ARM::Int_eh_sjlj_setup_dispatch: |
| 11182 | EmitSjLjDispatchBlock(MI, BB); |
| 11183 | return BB; |
| 11184 | |
| 11185 | case ARM::ABS: |
| 11186 | case ARM::t2ABS: { |
| 11187 | // To insert an ABS instruction, we have to insert the |
| 11188 | // diamond control-flow pattern. The incoming instruction knows the |
| 11189 | // source vreg to test against 0, the destination vreg to set, |
| 11190 | // the condition code register to branch on, the |
| 11191 | // true/false values to select between, and a branch opcode to use. |
| 11192 | // It transforms |
| 11193 | // V1 = ABS V0 |
| 11194 | // into |
| 11195 | // V2 = MOVS V0 |
| 11196 | // BCC (branch to SinkBB if V0 >= 0) |
| 11197 | // RSBBB: V3 = RSBri V2, 0 (compute ABS if V2 < 0) |
| 11198 | // SinkBB: V1 = PHI(V2, V3) |
| 11199 | const BasicBlock *LLVM_BB = BB->getBasicBlock(); |
| 11200 | MachineFunction::iterator BBI = ++BB->getIterator(); |
| 11201 | MachineFunction *Fn = BB->getParent(); |
| 11202 | MachineBasicBlock *RSBBB = Fn->CreateMachineBasicBlock(LLVM_BB); |
| 11203 | MachineBasicBlock *SinkBB = Fn->CreateMachineBasicBlock(LLVM_BB); |
| 11204 | Fn->insert(BBI, RSBBB); |
| 11205 | Fn->insert(BBI, SinkBB); |
| 11206 | |
| 11207 | Register ABSSrcReg = MI.getOperand(1).getReg(); |
| 11208 | Register ABSDstReg = MI.getOperand(0).getReg(); |
| 11209 | bool ABSSrcKIll = MI.getOperand(1).isKill(); |
| 11210 | bool isThumb2 = Subtarget->isThumb2(); |
| 11211 | MachineRegisterInfo &MRI = Fn->getRegInfo(); |
| 11212 | // In Thumb mode S must not be specified if source register is the SP or |
| 11213 | // PC and if destination register is the SP, so restrict register class |
| 11214 | Register NewRsbDstReg = MRI.createVirtualRegister( |
| 11215 | isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass); |
| 11216 | |
| 11217 | // Transfer the remainder of BB and its successor edges to sinkMBB. |
| 11218 | SinkBB->splice(SinkBB->begin(), BB, |
| 11219 | std::next(MachineBasicBlock::iterator(MI)), BB->end()); |
| 11220 | SinkBB->transferSuccessorsAndUpdatePHIs(BB); |
| 11221 | |
| 11222 | BB->addSuccessor(RSBBB); |
| 11223 | BB->addSuccessor(SinkBB); |
| 11224 | |
| 11225 | // fall through to SinkMBB |
| 11226 | RSBBB->addSuccessor(SinkBB); |
| 11227 | |
| 11228 | // insert a cmp at the end of BB |
| 11229 | BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) |
| 11230 | .addReg(ABSSrcReg) |
| 11231 | .addImm(0) |
| 11232 | .add(predOps(ARMCC::AL)); |
| 11233 | |
| 11234 | // insert a bcc with opposite CC to ARMCC::MI at the end of BB |
| 11235 | BuildMI(BB, dl, |
| 11236 | TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)).addMBB(SinkBB) |
| 11237 | .addImm(ARMCC::getOppositeCondition(ARMCC::MI)).addReg(ARM::CPSR); |
| 11238 | |
| 11239 | // insert rsbri in RSBBB |
| 11240 | // Note: BCC and rsbri will be converted into predicated rsbmi |
| 11241 | // by if-conversion pass |
| 11242 | BuildMI(*RSBBB, RSBBB->begin(), dl, |
| 11243 | TII->get(isThumb2 ? ARM::t2RSBri : ARM::RSBri), NewRsbDstReg) |
| 11244 | .addReg(ABSSrcReg, ABSSrcKIll ? RegState::Kill : 0) |
| 11245 | .addImm(0) |
| 11246 | .add(predOps(ARMCC::AL)) |
| 11247 | .add(condCodeOp()); |
| 11248 | |
| 11249 | // insert PHI in SinkBB, |
| 11250 | // reuse ABSDstReg to not change uses of ABS instruction |
| 11251 | BuildMI(*SinkBB, SinkBB->begin(), dl, |
| 11252 | TII->get(ARM::PHI), ABSDstReg) |
| 11253 | .addReg(NewRsbDstReg).addMBB(RSBBB) |
| 11254 | .addReg(ABSSrcReg).addMBB(BB); |
| 11255 | |
| 11256 | // remove ABS instruction |
| 11257 | MI.eraseFromParent(); |
| 11258 | |
| 11259 | // return last added BB |
| 11260 | return SinkBB; |
| 11261 | } |
| 11262 | case ARM::COPY_STRUCT_BYVAL_I32: |
| 11263 | ++NumLoopByVals; |
| 11264 | return EmitStructByval(MI, BB); |
| 11265 | case ARM::WIN__CHKSTK: |
| 11266 | return EmitLowered__chkstk(MI, BB); |
| 11267 | case ARM::WIN__DBZCHK: |
| 11268 | return EmitLowered__dbzchk(MI, BB); |
| 11269 | case ARM::t2DoLoopStart: |
| 11270 | // We are just here to set a register allocation hint, prefering lr for the |
| 11271 | // input register to make it more likely to be movable and removable, later |
| 11272 | // in the pipeline. |
| 11273 | Register R = MI.getOperand(1).getReg(); |
| 11274 | MachineFunction *MF = MI.getParent()->getParent(); |
| 11275 | MF->getRegInfo().setRegAllocationHint(R, ARMRI::RegLR, 0); |
| 11276 | return BB; |
| 11277 | } |
| 11278 | } |
| 11279 | |
| 11280 | /// Attaches vregs to MEMCPY that it will use as scratch registers |
| 11281 | /// when it is expanded into LDM/STM. This is done as a post-isel lowering |
| 11282 | /// instead of as a custom inserter because we need the use list from the SDNode. |
| 11283 | static void attachMEMCPYScratchRegs(const ARMSubtarget *Subtarget, |
| 11284 | MachineInstr &MI, const SDNode *Node) { |
| 11285 | bool isThumb1 = Subtarget->isThumb1Only(); |
| 11286 | |
| 11287 | DebugLoc DL = MI.getDebugLoc(); |
| 11288 | MachineFunction *MF = MI.getParent()->getParent(); |
| 11289 | MachineRegisterInfo &MRI = MF->getRegInfo(); |
| 11290 | MachineInstrBuilder MIB(*MF, MI); |
| 11291 | |
| 11292 | // If the new dst/src is unused mark it as dead. |
| 11293 | if (!Node->hasAnyUseOfValue(0)) { |
| 11294 | MI.getOperand(0).setIsDead(true); |
| 11295 | } |
| 11296 | if (!Node->hasAnyUseOfValue(1)) { |
| 11297 | MI.getOperand(1).setIsDead(true); |
| 11298 | } |
| 11299 | |
| 11300 | // The MEMCPY both defines and kills the scratch registers. |
| 11301 | for (unsigned I = 0; I != MI.getOperand(4).getImm(); ++I) { |
| 11302 | Register TmpReg = MRI.createVirtualRegister(isThumb1 ? &ARM::tGPRRegClass |
| 11303 | : &ARM::GPRRegClass); |
| 11304 | MIB.addReg(TmpReg, RegState::Define|RegState::Dead); |
| 11305 | } |
| 11306 | } |
| 11307 | |
| 11308 | void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI, |
| 11309 | SDNode *Node) const { |
| 11310 | if (MI.getOpcode() == ARM::MEMCPY) { |
| 11311 | attachMEMCPYScratchRegs(Subtarget, MI, Node); |
| 11312 | return; |
| 11313 | } |
| 11314 | |
| 11315 | const MCInstrDesc *MCID = &MI.getDesc(); |
| 11316 | // Adjust potentially 's' setting instructions after isel, i.e. ADC, SBC, RSB, |
| 11317 | // RSC. Coming out of isel, they have an implicit CPSR def, but the optional |
| 11318 | // operand is still set to noreg. If needed, set the optional operand's |
| 11319 | // register to CPSR, and remove the redundant implicit def. |
| 11320 | // |
| 11321 | // e.g. ADCS (..., implicit-def CPSR) -> ADC (... opt:def CPSR). |
| 11322 | |
| 11323 | // Rename pseudo opcodes. |
| 11324 | unsigned NewOpc = convertAddSubFlagsOpcode(MI.getOpcode()); |
| 11325 | unsigned ccOutIdx; |
| 11326 | if (NewOpc) { |
| 11327 | const ARMBaseInstrInfo *TII = Subtarget->getInstrInfo(); |
| 11328 | MCID = &TII->get(NewOpc); |
| 11329 | |
| 11330 | assert(MCID->getNumOperands() == |
| 11331 | MI.getDesc().getNumOperands() + 5 - MI.getDesc().getSize() |
| 11332 | && "converted opcode should be the same except for cc_out" |
| 11333 | " (and, on Thumb1, pred)" ); |
| 11334 | |
| 11335 | MI.setDesc(*MCID); |
| 11336 | |
| 11337 | // Add the optional cc_out operand |
| 11338 | MI.addOperand(MachineOperand::CreateReg(0, /*isDef=*/true)); |
| 11339 | |
| 11340 | // On Thumb1, move all input operands to the end, then add the predicate |
| 11341 | if (Subtarget->isThumb1Only()) { |
| 11342 | for (unsigned c = MCID->getNumOperands() - 4; c--;) { |
| 11343 | MI.addOperand(MI.getOperand(1)); |
| 11344 | MI.RemoveOperand(1); |
| 11345 | } |
| 11346 | |
| 11347 | // Restore the ties |
| 11348 | for (unsigned i = MI.getNumOperands(); i--;) { |
| 11349 | const MachineOperand& op = MI.getOperand(i); |
| 11350 | if (op.isReg() && op.isUse()) { |
| 11351 | int DefIdx = MCID->getOperandConstraint(i, MCOI::TIED_TO); |
| 11352 | if (DefIdx != -1) |
| 11353 | MI.tieOperands(DefIdx, i); |
| 11354 | } |
| 11355 | } |
| 11356 | |
| 11357 | MI.addOperand(MachineOperand::CreateImm(ARMCC::AL)); |
| 11358 | MI.addOperand(MachineOperand::CreateReg(0, /*isDef=*/false)); |
| 11359 | ccOutIdx = 1; |
| 11360 | } else |
| 11361 | ccOutIdx = MCID->getNumOperands() - 1; |
| 11362 | } else |
| 11363 | ccOutIdx = MCID->getNumOperands() - 1; |
| 11364 | |
| 11365 | // Any ARM instruction that sets the 's' bit should specify an optional |
| 11366 | // "cc_out" operand in the last operand position. |
| 11367 | if (!MI.hasOptionalDef() || !MCID->OpInfo[ccOutIdx].isOptionalDef()) { |
| 11368 | assert(!NewOpc && "Optional cc_out operand required" ); |
| 11369 | return; |
| 11370 | } |
| 11371 | // Look for an implicit def of CPSR added by MachineInstr ctor. Remove it |
| 11372 | // since we already have an optional CPSR def. |
| 11373 | bool definesCPSR = false; |
| 11374 | bool deadCPSR = false; |
| 11375 | for (unsigned i = MCID->getNumOperands(), e = MI.getNumOperands(); i != e; |
| 11376 | ++i) { |
| 11377 | const MachineOperand &MO = MI.getOperand(i); |
| 11378 | if (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR) { |
| 11379 | definesCPSR = true; |
| 11380 | if (MO.isDead()) |
| 11381 | deadCPSR = true; |
| 11382 | MI.RemoveOperand(i); |
| 11383 | break; |
| 11384 | } |
| 11385 | } |
| 11386 | if (!definesCPSR) { |
| 11387 | assert(!NewOpc && "Optional cc_out operand required" ); |
| 11388 | return; |
| 11389 | } |
| 11390 | assert(deadCPSR == !Node->hasAnyUseOfValue(1) && "inconsistent dead flag" ); |
| 11391 | if (deadCPSR) { |
| 11392 | assert(!MI.getOperand(ccOutIdx).getReg() && |
| 11393 | "expect uninitialized optional cc_out operand" ); |
| 11394 | // Thumb1 instructions must have the S bit even if the CPSR is dead. |
| 11395 | if (!Subtarget->isThumb1Only()) |
| 11396 | return; |
| 11397 | } |
| 11398 | |
| 11399 | // If this instruction was defined with an optional CPSR def and its dag node |
| 11400 | // had a live implicit CPSR def, then activate the optional CPSR def. |
| 11401 | MachineOperand &MO = MI.getOperand(ccOutIdx); |
| 11402 | MO.setReg(ARM::CPSR); |
| 11403 | MO.setIsDef(true); |
| 11404 | } |
| 11405 | |
| 11406 | //===----------------------------------------------------------------------===// |
| 11407 | // ARM Optimization Hooks |
| 11408 | //===----------------------------------------------------------------------===// |
| 11409 | |
| 11410 | // Helper function that checks if N is a null or all ones constant. |
| 11411 | static inline bool isZeroOrAllOnes(SDValue N, bool AllOnes) { |
| 11412 | return AllOnes ? isAllOnesConstant(N) : isNullConstant(N); |
| 11413 | } |
| 11414 | |
| 11415 | // Return true if N is conditionally 0 or all ones. |
| 11416 | // Detects these expressions where cc is an i1 value: |
| 11417 | // |
| 11418 | // (select cc 0, y) [AllOnes=0] |
| 11419 | // (select cc y, 0) [AllOnes=0] |
| 11420 | // (zext cc) [AllOnes=0] |
| 11421 | // (sext cc) [AllOnes=0/1] |
| 11422 | // (select cc -1, y) [AllOnes=1] |
| 11423 | // (select cc y, -1) [AllOnes=1] |
| 11424 | // |
| 11425 | // Invert is set when N is the null/all ones constant when CC is false. |
| 11426 | // OtherOp is set to the alternative value of N. |
| 11427 | static bool isConditionalZeroOrAllOnes(SDNode *N, bool AllOnes, |
| 11428 | SDValue &CC, bool &Invert, |
| 11429 | SDValue &OtherOp, |
| 11430 | SelectionDAG &DAG) { |
| 11431 | switch (N->getOpcode()) { |
| 11432 | default: return false; |
| 11433 | case ISD::SELECT: { |
| 11434 | CC = N->getOperand(0); |
| 11435 | SDValue N1 = N->getOperand(1); |
| 11436 | SDValue N2 = N->getOperand(2); |
| 11437 | if (isZeroOrAllOnes(N1, AllOnes)) { |
| 11438 | Invert = false; |
| 11439 | OtherOp = N2; |
| 11440 | return true; |
| 11441 | } |
| 11442 | if (isZeroOrAllOnes(N2, AllOnes)) { |
| 11443 | Invert = true; |
| 11444 | OtherOp = N1; |
| 11445 | return true; |
| 11446 | } |
| 11447 | return false; |
| 11448 | } |
| 11449 | case ISD::ZERO_EXTEND: |
| 11450 | // (zext cc) can never be the all ones value. |
| 11451 | if (AllOnes) |
| 11452 | return false; |
| 11453 | LLVM_FALLTHROUGH; |
| 11454 | case ISD::SIGN_EXTEND: { |
| 11455 | SDLoc dl(N); |
| 11456 | EVT VT = N->getValueType(0); |
| 11457 | CC = N->getOperand(0); |
| 11458 | if (CC.getValueType() != MVT::i1 || CC.getOpcode() != ISD::SETCC) |
| 11459 | return false; |
| 11460 | Invert = !AllOnes; |
| 11461 | if (AllOnes) |
| 11462 | // When looking for an AllOnes constant, N is an sext, and the 'other' |
| 11463 | // value is 0. |
| 11464 | OtherOp = DAG.getConstant(0, dl, VT); |
| 11465 | else if (N->getOpcode() == ISD::ZERO_EXTEND) |
| 11466 | // When looking for a 0 constant, N can be zext or sext. |
| 11467 | OtherOp = DAG.getConstant(1, dl, VT); |
| 11468 | else |
| 11469 | OtherOp = DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), dl, |
| 11470 | VT); |
| 11471 | return true; |
| 11472 | } |
| 11473 | } |
| 11474 | } |
| 11475 | |
| 11476 | // Combine a constant select operand into its use: |
| 11477 | // |
| 11478 | // (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) |
| 11479 | // (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) |
| 11480 | // (and (select cc, -1, c), x) -> (select cc, x, (and, x, c)) [AllOnes=1] |
| 11481 | // (or (select cc, 0, c), x) -> (select cc, x, (or, x, c)) |
| 11482 | // (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c)) |
| 11483 | // |
| 11484 | // The transform is rejected if the select doesn't have a constant operand that |
| 11485 | // is null, or all ones when AllOnes is set. |
| 11486 | // |
| 11487 | // Also recognize sext/zext from i1: |
| 11488 | // |
| 11489 | // (add (zext cc), x) -> (select cc (add x, 1), x) |
| 11490 | // (add (sext cc), x) -> (select cc (add x, -1), x) |
| 11491 | // |
| 11492 | // These transformations eventually create predicated instructions. |
| 11493 | // |
| 11494 | // @param N The node to transform. |
| 11495 | // @param Slct The N operand that is a select. |
| 11496 | // @param OtherOp The other N operand (x above). |
| 11497 | // @param DCI Context. |
| 11498 | // @param AllOnes Require the select constant to be all ones instead of null. |
| 11499 | // @returns The new node, or SDValue() on failure. |
| 11500 | static |
| 11501 | SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, |
| 11502 | TargetLowering::DAGCombinerInfo &DCI, |
| 11503 | bool AllOnes = false) { |
| 11504 | SelectionDAG &DAG = DCI.DAG; |
| 11505 | EVT VT = N->getValueType(0); |
| 11506 | SDValue NonConstantVal; |
| 11507 | SDValue CCOp; |
| 11508 | bool SwapSelectOps; |
| 11509 | if (!isConditionalZeroOrAllOnes(Slct.getNode(), AllOnes, CCOp, SwapSelectOps, |
| 11510 | NonConstantVal, DAG)) |
| 11511 | return SDValue(); |
| 11512 | |
| 11513 | // Slct is now know to be the desired identity constant when CC is true. |
| 11514 | SDValue TrueVal = OtherOp; |
| 11515 | SDValue FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT, |
| 11516 | OtherOp, NonConstantVal); |
| 11517 | // Unless SwapSelectOps says CC should be false. |
| 11518 | if (SwapSelectOps) |
| 11519 | std::swap(TrueVal, FalseVal); |
| 11520 | |
| 11521 | return DAG.getNode(ISD::SELECT, SDLoc(N), VT, |
| 11522 | CCOp, TrueVal, FalseVal); |
| 11523 | } |
| 11524 | |
| 11525 | // Attempt combineSelectAndUse on each operand of a commutative operator N. |
| 11526 | static |
| 11527 | SDValue combineSelectAndUseCommutative(SDNode *N, bool AllOnes, |
| 11528 | TargetLowering::DAGCombinerInfo &DCI) { |
| 11529 | SDValue N0 = N->getOperand(0); |
| 11530 | SDValue N1 = N->getOperand(1); |
| 11531 | if (N0.getNode()->hasOneUse()) |
| 11532 | if (SDValue Result = combineSelectAndUse(N, N0, N1, DCI, AllOnes)) |
| 11533 | return Result; |
| 11534 | if (N1.getNode()->hasOneUse()) |
| 11535 | if (SDValue Result = combineSelectAndUse(N, N1, N0, DCI, AllOnes)) |
| 11536 | return Result; |
| 11537 | return SDValue(); |
| 11538 | } |
| 11539 | |
| 11540 | static bool IsVUZPShuffleNode(SDNode *N) { |
| 11541 | // VUZP shuffle node. |
| 11542 | if (N->getOpcode() == ARMISD::VUZP) |
| 11543 | return true; |
| 11544 | |
| 11545 | // "VUZP" on i32 is an alias for VTRN. |
| 11546 | if (N->getOpcode() == ARMISD::VTRN && N->getValueType(0) == MVT::v2i32) |
| 11547 | return true; |
| 11548 | |
| 11549 | return false; |
| 11550 | } |
| 11551 | |
| 11552 | static SDValue AddCombineToVPADD(SDNode *N, SDValue N0, SDValue N1, |
| 11553 | TargetLowering::DAGCombinerInfo &DCI, |
| 11554 | const ARMSubtarget *Subtarget) { |
| 11555 | // Look for ADD(VUZP.0, VUZP.1). |
| 11556 | if (!IsVUZPShuffleNode(N0.getNode()) || N0.getNode() != N1.getNode() || |
| 11557 | N0 == N1) |
| 11558 | return SDValue(); |
| 11559 | |
| 11560 | // Make sure the ADD is a 64-bit add; there is no 128-bit VPADD. |
| 11561 | if (!N->getValueType(0).is64BitVector()) |
| 11562 | return SDValue(); |
| 11563 | |
| 11564 | // Generate vpadd. |
| 11565 | SelectionDAG &DAG = DCI.DAG; |
| 11566 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 11567 | SDLoc dl(N); |
| 11568 | SDNode *Unzip = N0.getNode(); |
| 11569 | EVT VT = N->getValueType(0); |
| 11570 | |
| 11571 | SmallVector<SDValue, 8> Ops; |
| 11572 | Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpadd, dl, |
| 11573 | TLI.getPointerTy(DAG.getDataLayout()))); |
| 11574 | Ops.push_back(Unzip->getOperand(0)); |
| 11575 | Ops.push_back(Unzip->getOperand(1)); |
| 11576 | |
| 11577 | return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, Ops); |
| 11578 | } |
| 11579 | |
| 11580 | static SDValue AddCombineVUZPToVPADDL(SDNode *N, SDValue N0, SDValue N1, |
| 11581 | TargetLowering::DAGCombinerInfo &DCI, |
| 11582 | const ARMSubtarget *Subtarget) { |
| 11583 | // Check for two extended operands. |
| 11584 | if (!(N0.getOpcode() == ISD::SIGN_EXTEND && |
| 11585 | N1.getOpcode() == ISD::SIGN_EXTEND) && |
| 11586 | !(N0.getOpcode() == ISD::ZERO_EXTEND && |
| 11587 | N1.getOpcode() == ISD::ZERO_EXTEND)) |
| 11588 | return SDValue(); |
| 11589 | |
| 11590 | SDValue N00 = N0.getOperand(0); |
| 11591 | SDValue N10 = N1.getOperand(0); |
| 11592 | |
| 11593 | // Look for ADD(SEXT(VUZP.0), SEXT(VUZP.1)) |
| 11594 | if (!IsVUZPShuffleNode(N00.getNode()) || N00.getNode() != N10.getNode() || |
| 11595 | N00 == N10) |
| 11596 | return SDValue(); |
| 11597 | |
| 11598 | // We only recognize Q register paddl here; this can't be reached until |
| 11599 | // after type legalization. |
| 11600 | if (!N00.getValueType().is64BitVector() || |
| 11601 | !N0.getValueType().is128BitVector()) |
| 11602 | return SDValue(); |
| 11603 | |
| 11604 | // Generate vpaddl. |
| 11605 | SelectionDAG &DAG = DCI.DAG; |
| 11606 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 11607 | SDLoc dl(N); |
| 11608 | EVT VT = N->getValueType(0); |
| 11609 | |
| 11610 | SmallVector<SDValue, 8> Ops; |
| 11611 | // Form vpaddl.sN or vpaddl.uN depending on the kind of extension. |
| 11612 | unsigned Opcode; |
| 11613 | if (N0.getOpcode() == ISD::SIGN_EXTEND) |
| 11614 | Opcode = Intrinsic::arm_neon_vpaddls; |
| 11615 | else |
| 11616 | Opcode = Intrinsic::arm_neon_vpaddlu; |
| 11617 | Ops.push_back(DAG.getConstant(Opcode, dl, |
| 11618 | TLI.getPointerTy(DAG.getDataLayout()))); |
| 11619 | EVT ElemTy = N00.getValueType().getVectorElementType(); |
| 11620 | unsigned NumElts = VT.getVectorNumElements(); |
| 11621 | EVT ConcatVT = EVT::getVectorVT(*DAG.getContext(), ElemTy, NumElts * 2); |
| 11622 | SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), ConcatVT, |
| 11623 | N00.getOperand(0), N00.getOperand(1)); |
| 11624 | Ops.push_back(Concat); |
| 11625 | |
| 11626 | return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, Ops); |
| 11627 | } |
| 11628 | |
| 11629 | // FIXME: This function shouldn't be necessary; if we lower BUILD_VECTOR in |
| 11630 | // an appropriate manner, we end up with ADD(VUZP(ZEXT(N))), which is |
| 11631 | // much easier to match. |
| 11632 | static SDValue |
| 11633 | AddCombineBUILD_VECTORToVPADDL(SDNode *N, SDValue N0, SDValue N1, |
| 11634 | TargetLowering::DAGCombinerInfo &DCI, |
| 11635 | const ARMSubtarget *Subtarget) { |
| 11636 | // Only perform optimization if after legalize, and if NEON is available. We |
| 11637 | // also expected both operands to be BUILD_VECTORs. |
| 11638 | if (DCI.isBeforeLegalize() || !Subtarget->hasNEON() |
| 11639 | || N0.getOpcode() != ISD::BUILD_VECTOR |
| 11640 | || N1.getOpcode() != ISD::BUILD_VECTOR) |
| 11641 | return SDValue(); |
| 11642 | |
| 11643 | // Check output type since VPADDL operand elements can only be 8, 16, or 32. |
| 11644 | EVT VT = N->getValueType(0); |
| 11645 | if (!VT.isInteger() || VT.getVectorElementType() == MVT::i64) |
| 11646 | return SDValue(); |
| 11647 | |
| 11648 | // Check that the vector operands are of the right form. |
| 11649 | // N0 and N1 are BUILD_VECTOR nodes with N number of EXTRACT_VECTOR |
| 11650 | // operands, where N is the size of the formed vector. |
| 11651 | // Each EXTRACT_VECTOR should have the same input vector and odd or even |
| 11652 | // index such that we have a pair wise add pattern. |
| 11653 | |
| 11654 | // Grab the vector that all EXTRACT_VECTOR nodes should be referencing. |
| 11655 | if (N0->getOperand(0)->getOpcode() != ISD::EXTRACT_VECTOR_ELT) |
| 11656 | return SDValue(); |
| 11657 | SDValue Vec = N0->getOperand(0)->getOperand(0); |
| 11658 | SDNode *V = Vec.getNode(); |
| 11659 | unsigned nextIndex = 0; |
| 11660 | |
| 11661 | // For each operands to the ADD which are BUILD_VECTORs, |
| 11662 | // check to see if each of their operands are an EXTRACT_VECTOR with |
| 11663 | // the same vector and appropriate index. |
| 11664 | for (unsigned i = 0, e = N0->getNumOperands(); i != e; ++i) { |
| 11665 | if (N0->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT |
| 11666 | && N1->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT) { |
| 11667 | |
| 11668 | SDValue ExtVec0 = N0->getOperand(i); |
| 11669 | SDValue ExtVec1 = N1->getOperand(i); |
| 11670 | |
| 11671 | // First operand is the vector, verify its the same. |
| 11672 | if (V != ExtVec0->getOperand(0).getNode() || |
| 11673 | V != ExtVec1->getOperand(0).getNode()) |
| 11674 | return SDValue(); |
| 11675 | |
| 11676 | // Second is the constant, verify its correct. |
| 11677 | ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(ExtVec0->getOperand(1)); |
| 11678 | ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(ExtVec1->getOperand(1)); |
| 11679 | |
| 11680 | // For the constant, we want to see all the even or all the odd. |
| 11681 | if (!C0 || !C1 || C0->getZExtValue() != nextIndex |
| 11682 | || C1->getZExtValue() != nextIndex+1) |
| 11683 | return SDValue(); |
| 11684 | |
| 11685 | // Increment index. |
| 11686 | nextIndex+=2; |
| 11687 | } else |
| 11688 | return SDValue(); |
| 11689 | } |
| 11690 | |
| 11691 | // Don't generate vpaddl+vmovn; we'll match it to vpadd later. Also make sure |
| 11692 | // we're using the entire input vector, otherwise there's a size/legality |
| 11693 | // mismatch somewhere. |
| 11694 | if (nextIndex != Vec.getValueType().getVectorNumElements() || |
| 11695 | Vec.getValueType().getVectorElementType() == VT.getVectorElementType()) |
| 11696 | return SDValue(); |
| 11697 | |
| 11698 | // Create VPADDL node. |
| 11699 | SelectionDAG &DAG = DCI.DAG; |
| 11700 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 11701 | |
| 11702 | SDLoc dl(N); |
| 11703 | |
| 11704 | // Build operand list. |
| 11705 | SmallVector<SDValue, 8> Ops; |
| 11706 | Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpaddls, dl, |
| 11707 | TLI.getPointerTy(DAG.getDataLayout()))); |
| 11708 | |
| 11709 | // Input is the vector. |
| 11710 | Ops.push_back(Vec); |
| 11711 | |
| 11712 | // Get widened type and narrowed type. |
| 11713 | MVT widenType; |
| 11714 | unsigned numElem = VT.getVectorNumElements(); |
| 11715 | |
| 11716 | EVT inputLaneType = Vec.getValueType().getVectorElementType(); |
| 11717 | switch (inputLaneType.getSimpleVT().SimpleTy) { |
| 11718 | case MVT::i8: widenType = MVT::getVectorVT(MVT::i16, numElem); break; |
| 11719 | case MVT::i16: widenType = MVT::getVectorVT(MVT::i32, numElem); break; |
| 11720 | case MVT::i32: widenType = MVT::getVectorVT(MVT::i64, numElem); break; |
| 11721 | default: |
| 11722 | llvm_unreachable("Invalid vector element type for padd optimization." ); |
| 11723 | } |
| 11724 | |
| 11725 | SDValue tmp = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, widenType, Ops); |
| 11726 | unsigned ExtOp = VT.bitsGT(tmp.getValueType()) ? ISD::ANY_EXTEND : ISD::TRUNCATE; |
| 11727 | return DAG.getNode(ExtOp, dl, VT, tmp); |
| 11728 | } |
| 11729 | |
| 11730 | static SDValue findMUL_LOHI(SDValue V) { |
| 11731 | if (V->getOpcode() == ISD::UMUL_LOHI || |
| 11732 | V->getOpcode() == ISD::SMUL_LOHI) |
| 11733 | return V; |
| 11734 | return SDValue(); |
| 11735 | } |
| 11736 | |
| 11737 | static SDValue AddCombineTo64BitSMLAL16(SDNode *AddcNode, SDNode *AddeNode, |
| 11738 | TargetLowering::DAGCombinerInfo &DCI, |
| 11739 | const ARMSubtarget *Subtarget) { |
| 11740 | if (!Subtarget->hasBaseDSP()) |
| 11741 | return SDValue(); |
| 11742 | |
| 11743 | // SMLALBB, SMLALBT, SMLALTB, SMLALTT multiply two 16-bit values and |
| 11744 | // accumulates the product into a 64-bit value. The 16-bit values will |
| 11745 | // be sign extended somehow or SRA'd into 32-bit values |
| 11746 | // (addc (adde (mul 16bit, 16bit), lo), hi) |
| 11747 | SDValue Mul = AddcNode->getOperand(0); |
| 11748 | SDValue Lo = AddcNode->getOperand(1); |
| 11749 | if (Mul.getOpcode() != ISD::MUL) { |
| 11750 | Lo = AddcNode->getOperand(0); |
| 11751 | Mul = AddcNode->getOperand(1); |
| 11752 | if (Mul.getOpcode() != ISD::MUL) |
| 11753 | return SDValue(); |
| 11754 | } |
| 11755 | |
| 11756 | SDValue SRA = AddeNode->getOperand(0); |
| 11757 | SDValue Hi = AddeNode->getOperand(1); |
| 11758 | if (SRA.getOpcode() != ISD::SRA) { |
| 11759 | SRA = AddeNode->getOperand(1); |
| 11760 | Hi = AddeNode->getOperand(0); |
| 11761 | if (SRA.getOpcode() != ISD::SRA) |
| 11762 | return SDValue(); |
| 11763 | } |
| 11764 | if (auto Const = dyn_cast<ConstantSDNode>(SRA.getOperand(1))) { |
| 11765 | if (Const->getZExtValue() != 31) |
| 11766 | return SDValue(); |
| 11767 | } else |
| 11768 | return SDValue(); |
| 11769 | |
| 11770 | if (SRA.getOperand(0) != Mul) |
| 11771 | return SDValue(); |
| 11772 | |
| 11773 | SelectionDAG &DAG = DCI.DAG; |
| 11774 | SDLoc dl(AddcNode); |
| 11775 | unsigned Opcode = 0; |
| 11776 | SDValue Op0; |
| 11777 | SDValue Op1; |
| 11778 | |
| 11779 | if (isS16(Mul.getOperand(0), DAG) && isS16(Mul.getOperand(1), DAG)) { |
| 11780 | Opcode = ARMISD::SMLALBB; |
| 11781 | Op0 = Mul.getOperand(0); |
| 11782 | Op1 = Mul.getOperand(1); |
| 11783 | } else if (isS16(Mul.getOperand(0), DAG) && isSRA16(Mul.getOperand(1))) { |
| 11784 | Opcode = ARMISD::SMLALBT; |
| 11785 | Op0 = Mul.getOperand(0); |
| 11786 | Op1 = Mul.getOperand(1).getOperand(0); |
| 11787 | } else if (isSRA16(Mul.getOperand(0)) && isS16(Mul.getOperand(1), DAG)) { |
| 11788 | Opcode = ARMISD::SMLALTB; |
| 11789 | Op0 = Mul.getOperand(0).getOperand(0); |
| 11790 | Op1 = Mul.getOperand(1); |
| 11791 | } else if (isSRA16(Mul.getOperand(0)) && isSRA16(Mul.getOperand(1))) { |
| 11792 | Opcode = ARMISD::SMLALTT; |
| 11793 | Op0 = Mul->getOperand(0).getOperand(0); |
| 11794 | Op1 = Mul->getOperand(1).getOperand(0); |
| 11795 | } |
| 11796 | |
| 11797 | if (!Op0 || !Op1) |
| 11798 | return SDValue(); |
| 11799 | |
| 11800 | SDValue SMLAL = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32), |
| 11801 | Op0, Op1, Lo, Hi); |
| 11802 | // Replace the ADDs' nodes uses by the MLA node's values. |
| 11803 | SDValue HiMLALResult(SMLAL.getNode(), 1); |
| 11804 | SDValue LoMLALResult(SMLAL.getNode(), 0); |
| 11805 | |
| 11806 | DAG.ReplaceAllUsesOfValueWith(SDValue(AddcNode, 0), LoMLALResult); |
| 11807 | DAG.ReplaceAllUsesOfValueWith(SDValue(AddeNode, 0), HiMLALResult); |
| 11808 | |
| 11809 | // Return original node to notify the driver to stop replacing. |
| 11810 | SDValue resNode(AddcNode, 0); |
| 11811 | return resNode; |
| 11812 | } |
| 11813 | |
| 11814 | static SDValue AddCombineTo64bitMLAL(SDNode *AddeSubeNode, |
| 11815 | TargetLowering::DAGCombinerInfo &DCI, |
| 11816 | const ARMSubtarget *Subtarget) { |
| 11817 | // Look for multiply add opportunities. |
| 11818 | // The pattern is a ISD::UMUL_LOHI followed by two add nodes, where |
| 11819 | // each add nodes consumes a value from ISD::UMUL_LOHI and there is |
| 11820 | // a glue link from the first add to the second add. |
| 11821 | // If we find this pattern, we can replace the U/SMUL_LOHI, ADDC, and ADDE by |
| 11822 | // a S/UMLAL instruction. |
| 11823 | // UMUL_LOHI |
| 11824 | // / :lo \ :hi |
| 11825 | // V \ [no multiline comment] |
| 11826 | // loAdd -> ADDC | |
| 11827 | // \ :carry / |
| 11828 | // V V |
| 11829 | // ADDE <- hiAdd |
| 11830 | // |
| 11831 | // In the special case where only the higher part of a signed result is used |
| 11832 | // and the add to the low part of the result of ISD::UMUL_LOHI adds or subtracts |
| 11833 | // a constant with the exact value of 0x80000000, we recognize we are dealing |
| 11834 | // with a "rounded multiply and add" (or subtract) and transform it into |
| 11835 | // either a ARMISD::SMMLAR or ARMISD::SMMLSR respectively. |
| 11836 | |
| 11837 | assert((AddeSubeNode->getOpcode() == ARMISD::ADDE || |
| 11838 | AddeSubeNode->getOpcode() == ARMISD::SUBE) && |
| 11839 | "Expect an ADDE or SUBE" ); |
| 11840 | |
| 11841 | assert(AddeSubeNode->getNumOperands() == 3 && |
| 11842 | AddeSubeNode->getOperand(2).getValueType() == MVT::i32 && |
| 11843 | "ADDE node has the wrong inputs" ); |
| 11844 | |
| 11845 | // Check that we are chained to the right ADDC or SUBC node. |
| 11846 | SDNode *AddcSubcNode = AddeSubeNode->getOperand(2).getNode(); |
| 11847 | if ((AddeSubeNode->getOpcode() == ARMISD::ADDE && |
| 11848 | AddcSubcNode->getOpcode() != ARMISD::ADDC) || |
| 11849 | (AddeSubeNode->getOpcode() == ARMISD::SUBE && |
| 11850 | AddcSubcNode->getOpcode() != ARMISD::SUBC)) |
| 11851 | return SDValue(); |
| 11852 | |
| 11853 | SDValue AddcSubcOp0 = AddcSubcNode->getOperand(0); |
| 11854 | SDValue AddcSubcOp1 = AddcSubcNode->getOperand(1); |
| 11855 | |
| 11856 | // Check if the two operands are from the same mul_lohi node. |
| 11857 | if (AddcSubcOp0.getNode() == AddcSubcOp1.getNode()) |
| 11858 | return SDValue(); |
| 11859 | |
| 11860 | assert(AddcSubcNode->getNumValues() == 2 && |
| 11861 | AddcSubcNode->getValueType(0) == MVT::i32 && |
| 11862 | "Expect ADDC with two result values. First: i32" ); |
| 11863 | |
| 11864 | // Check that the ADDC adds the low result of the S/UMUL_LOHI. If not, it |
| 11865 | // maybe a SMLAL which multiplies two 16-bit values. |
| 11866 | if (AddeSubeNode->getOpcode() == ARMISD::ADDE && |
| 11867 | AddcSubcOp0->getOpcode() != ISD::UMUL_LOHI && |
| 11868 | AddcSubcOp0->getOpcode() != ISD::SMUL_LOHI && |
| 11869 | AddcSubcOp1->getOpcode() != ISD::UMUL_LOHI && |
| 11870 | AddcSubcOp1->getOpcode() != ISD::SMUL_LOHI) |
| 11871 | return AddCombineTo64BitSMLAL16(AddcSubcNode, AddeSubeNode, DCI, Subtarget); |
| 11872 | |
| 11873 | // Check for the triangle shape. |
| 11874 | SDValue AddeSubeOp0 = AddeSubeNode->getOperand(0); |
| 11875 | SDValue AddeSubeOp1 = AddeSubeNode->getOperand(1); |
| 11876 | |
| 11877 | // Make sure that the ADDE/SUBE operands are not coming from the same node. |
| 11878 | if (AddeSubeOp0.getNode() == AddeSubeOp1.getNode()) |
| 11879 | return SDValue(); |
| 11880 | |
| 11881 | // Find the MUL_LOHI node walking up ADDE/SUBE's operands. |
| 11882 | bool IsLeftOperandMUL = false; |
| 11883 | SDValue MULOp = findMUL_LOHI(AddeSubeOp0); |
| 11884 | if (MULOp == SDValue()) |
| 11885 | MULOp = findMUL_LOHI(AddeSubeOp1); |
| 11886 | else |
| 11887 | IsLeftOperandMUL = true; |
| 11888 | if (MULOp == SDValue()) |
| 11889 | return SDValue(); |
| 11890 | |
| 11891 | // Figure out the right opcode. |
| 11892 | unsigned Opc = MULOp->getOpcode(); |
| 11893 | unsigned FinalOpc = (Opc == ISD::SMUL_LOHI) ? ARMISD::SMLAL : ARMISD::UMLAL; |
| 11894 | |
| 11895 | // Figure out the high and low input values to the MLAL node. |
| 11896 | SDValue *HiAddSub = nullptr; |
| 11897 | SDValue *LoMul = nullptr; |
| 11898 | SDValue *LowAddSub = nullptr; |
| 11899 | |
| 11900 | // Ensure that ADDE/SUBE is from high result of ISD::xMUL_LOHI. |
| 11901 | if ((AddeSubeOp0 != MULOp.getValue(1)) && (AddeSubeOp1 != MULOp.getValue(1))) |
| 11902 | return SDValue(); |
| 11903 | |
| 11904 | if (IsLeftOperandMUL) |
| 11905 | HiAddSub = &AddeSubeOp1; |
| 11906 | else |
| 11907 | HiAddSub = &AddeSubeOp0; |
| 11908 | |
| 11909 | // Ensure that LoMul and LowAddSub are taken from correct ISD::SMUL_LOHI node |
| 11910 | // whose low result is fed to the ADDC/SUBC we are checking. |
| 11911 | |
| 11912 | if (AddcSubcOp0 == MULOp.getValue(0)) { |
| 11913 | LoMul = &AddcSubcOp0; |
| 11914 | LowAddSub = &AddcSubcOp1; |
| 11915 | } |
| 11916 | if (AddcSubcOp1 == MULOp.getValue(0)) { |
| 11917 | LoMul = &AddcSubcOp1; |
| 11918 | LowAddSub = &AddcSubcOp0; |
| 11919 | } |
| 11920 | |
| 11921 | if (!LoMul) |
| 11922 | return SDValue(); |
| 11923 | |
| 11924 | // If HiAddSub is the same node as ADDC/SUBC or is a predecessor of ADDC/SUBC |
| 11925 | // the replacement below will create a cycle. |
| 11926 | if (AddcSubcNode == HiAddSub->getNode() || |
| 11927 | AddcSubcNode->isPredecessorOf(HiAddSub->getNode())) |
| 11928 | return SDValue(); |
| 11929 | |
| 11930 | // Create the merged node. |
| 11931 | SelectionDAG &DAG = DCI.DAG; |
| 11932 | |
| 11933 | // Start building operand list. |
| 11934 | SmallVector<SDValue, 8> Ops; |
| 11935 | Ops.push_back(LoMul->getOperand(0)); |
| 11936 | Ops.push_back(LoMul->getOperand(1)); |
| 11937 | |
| 11938 | // Check whether we can use SMMLAR, SMMLSR or SMMULR instead. For this to be |
| 11939 | // the case, we must be doing signed multiplication and only use the higher |
| 11940 | // part of the result of the MLAL, furthermore the LowAddSub must be a constant |
| 11941 | // addition or subtraction with the value of 0x800000. |
| 11942 | if (Subtarget->hasV6Ops() && Subtarget->hasDSP() && Subtarget->useMulOps() && |
| 11943 | FinalOpc == ARMISD::SMLAL && !AddeSubeNode->hasAnyUseOfValue(1) && |
| 11944 | LowAddSub->getNode()->getOpcode() == ISD::Constant && |
| 11945 | static_cast<ConstantSDNode *>(LowAddSub->getNode())->getZExtValue() == |
| 11946 | 0x80000000) { |
| 11947 | Ops.push_back(*HiAddSub); |
| 11948 | if (AddcSubcNode->getOpcode() == ARMISD::SUBC) { |
| 11949 | FinalOpc = ARMISD::SMMLSR; |
| 11950 | } else { |
| 11951 | FinalOpc = ARMISD::SMMLAR; |
| 11952 | } |
| 11953 | SDValue NewNode = DAG.getNode(FinalOpc, SDLoc(AddcSubcNode), MVT::i32, Ops); |
| 11954 | DAG.ReplaceAllUsesOfValueWith(SDValue(AddeSubeNode, 0), NewNode); |
| 11955 | |
| 11956 | return SDValue(AddeSubeNode, 0); |
| 11957 | } else if (AddcSubcNode->getOpcode() == ARMISD::SUBC) |
| 11958 | // SMMLS is generated during instruction selection and the rest of this |
| 11959 | // function can not handle the case where AddcSubcNode is a SUBC. |
| 11960 | return SDValue(); |
| 11961 | |
| 11962 | // Finish building the operand list for {U/S}MLAL |
| 11963 | Ops.push_back(*LowAddSub); |
| 11964 | Ops.push_back(*HiAddSub); |
| 11965 | |
| 11966 | SDValue MLALNode = DAG.getNode(FinalOpc, SDLoc(AddcSubcNode), |
| 11967 | DAG.getVTList(MVT::i32, MVT::i32), Ops); |
| 11968 | |
| 11969 | // Replace the ADDs' nodes uses by the MLA node's values. |
| 11970 | SDValue HiMLALResult(MLALNode.getNode(), 1); |
| 11971 | DAG.ReplaceAllUsesOfValueWith(SDValue(AddeSubeNode, 0), HiMLALResult); |
| 11972 | |
| 11973 | SDValue LoMLALResult(MLALNode.getNode(), 0); |
| 11974 | DAG.ReplaceAllUsesOfValueWith(SDValue(AddcSubcNode, 0), LoMLALResult); |
| 11975 | |
| 11976 | // Return original node to notify the driver to stop replacing. |
| 11977 | return SDValue(AddeSubeNode, 0); |
| 11978 | } |
| 11979 | |
| 11980 | static SDValue AddCombineTo64bitUMAAL(SDNode *AddeNode, |
| 11981 | TargetLowering::DAGCombinerInfo &DCI, |
| 11982 | const ARMSubtarget *Subtarget) { |
| 11983 | // UMAAL is similar to UMLAL except that it adds two unsigned values. |
| 11984 | // While trying to combine for the other MLAL nodes, first search for the |
| 11985 | // chance to use UMAAL. Check if Addc uses a node which has already |
| 11986 | // been combined into a UMLAL. The other pattern is UMLAL using Addc/Adde |
| 11987 | // as the addend, and it's handled in PerformUMLALCombine. |
| 11988 | |
| 11989 | if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP()) |
| 11990 | return AddCombineTo64bitMLAL(AddeNode, DCI, Subtarget); |
| 11991 | |
| 11992 | // Check that we have a glued ADDC node. |
| 11993 | SDNode* AddcNode = AddeNode->getOperand(2).getNode(); |
| 11994 | if (AddcNode->getOpcode() != ARMISD::ADDC) |
| 11995 | return SDValue(); |
| 11996 | |
| 11997 | // Find the converted UMAAL or quit if it doesn't exist. |
| 11998 | SDNode *UmlalNode = nullptr; |
| 11999 | SDValue AddHi; |
| 12000 | if (AddcNode->getOperand(0).getOpcode() == ARMISD::UMLAL) { |
| 12001 | UmlalNode = AddcNode->getOperand(0).getNode(); |
| 12002 | AddHi = AddcNode->getOperand(1); |
| 12003 | } else if (AddcNode->getOperand(1).getOpcode() == ARMISD::UMLAL) { |
| 12004 | UmlalNode = AddcNode->getOperand(1).getNode(); |
| 12005 | AddHi = AddcNode->getOperand(0); |
| 12006 | } else { |
| 12007 | return AddCombineTo64bitMLAL(AddeNode, DCI, Subtarget); |
| 12008 | } |
| 12009 | |
| 12010 | // The ADDC should be glued to an ADDE node, which uses the same UMLAL as |
| 12011 | // the ADDC as well as Zero. |
| 12012 | if (!isNullConstant(UmlalNode->getOperand(3))) |
| 12013 | return SDValue(); |
| 12014 | |
| 12015 | if ((isNullConstant(AddeNode->getOperand(0)) && |
| 12016 | AddeNode->getOperand(1).getNode() == UmlalNode) || |
| 12017 | (AddeNode->getOperand(0).getNode() == UmlalNode && |
| 12018 | isNullConstant(AddeNode->getOperand(1)))) { |
| 12019 | SelectionDAG &DAG = DCI.DAG; |
| 12020 | SDValue Ops[] = { UmlalNode->getOperand(0), UmlalNode->getOperand(1), |
| 12021 | UmlalNode->getOperand(2), AddHi }; |
| 12022 | SDValue UMAAL = DAG.getNode(ARMISD::UMAAL, SDLoc(AddcNode), |
| 12023 | DAG.getVTList(MVT::i32, MVT::i32), Ops); |
| 12024 | |
| 12025 | // Replace the ADDs' nodes uses by the UMAAL node's values. |
| 12026 | DAG.ReplaceAllUsesOfValueWith(SDValue(AddeNode, 0), SDValue(UMAAL.getNode(), 1)); |
| 12027 | DAG.ReplaceAllUsesOfValueWith(SDValue(AddcNode, 0), SDValue(UMAAL.getNode(), 0)); |
| 12028 | |
| 12029 | // Return original node to notify the driver to stop replacing. |
| 12030 | return SDValue(AddeNode, 0); |
| 12031 | } |
| 12032 | return SDValue(); |
| 12033 | } |
| 12034 | |
| 12035 | static SDValue PerformUMLALCombine(SDNode *N, SelectionDAG &DAG, |
| 12036 | const ARMSubtarget *Subtarget) { |
| 12037 | if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP()) |
| 12038 | return SDValue(); |
| 12039 | |
| 12040 | // Check that we have a pair of ADDC and ADDE as operands. |
| 12041 | // Both addends of the ADDE must be zero. |
| 12042 | SDNode* AddcNode = N->getOperand(2).getNode(); |
| 12043 | SDNode* AddeNode = N->getOperand(3).getNode(); |
| 12044 | if ((AddcNode->getOpcode() == ARMISD::ADDC) && |
| 12045 | (AddeNode->getOpcode() == ARMISD::ADDE) && |
| 12046 | isNullConstant(AddeNode->getOperand(0)) && |
| 12047 | isNullConstant(AddeNode->getOperand(1)) && |
| 12048 | (AddeNode->getOperand(2).getNode() == AddcNode)) |
| 12049 | return DAG.getNode(ARMISD::UMAAL, SDLoc(N), |
| 12050 | DAG.getVTList(MVT::i32, MVT::i32), |
| 12051 | {N->getOperand(0), N->getOperand(1), |
| 12052 | AddcNode->getOperand(0), AddcNode->getOperand(1)}); |
| 12053 | else |
| 12054 | return SDValue(); |
| 12055 | } |
| 12056 | |
| 12057 | static SDValue PerformAddcSubcCombine(SDNode *N, |
| 12058 | TargetLowering::DAGCombinerInfo &DCI, |
| 12059 | const ARMSubtarget *Subtarget) { |
| 12060 | SelectionDAG &DAG(DCI.DAG); |
| 12061 | |
| 12062 | if (N->getOpcode() == ARMISD::SUBC) { |
| 12063 | // (SUBC (ADDE 0, 0, C), 1) -> C |
| 12064 | SDValue LHS = N->getOperand(0); |
| 12065 | SDValue RHS = N->getOperand(1); |
| 12066 | if (LHS->getOpcode() == ARMISD::ADDE && |
| 12067 | isNullConstant(LHS->getOperand(0)) && |
| 12068 | isNullConstant(LHS->getOperand(1)) && isOneConstant(RHS)) { |
| 12069 | return DCI.CombineTo(N, SDValue(N, 0), LHS->getOperand(2)); |
| 12070 | } |
| 12071 | } |
| 12072 | |
| 12073 | if (Subtarget->isThumb1Only()) { |
| 12074 | SDValue RHS = N->getOperand(1); |
| 12075 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) { |
| 12076 | int32_t imm = C->getSExtValue(); |
| 12077 | if (imm < 0 && imm > std::numeric_limits<int>::min()) { |
| 12078 | SDLoc DL(N); |
| 12079 | RHS = DAG.getConstant(-imm, DL, MVT::i32); |
| 12080 | unsigned Opcode = (N->getOpcode() == ARMISD::ADDC) ? ARMISD::SUBC |
| 12081 | : ARMISD::ADDC; |
| 12082 | return DAG.getNode(Opcode, DL, N->getVTList(), N->getOperand(0), RHS); |
| 12083 | } |
| 12084 | } |
| 12085 | } |
| 12086 | |
| 12087 | return SDValue(); |
| 12088 | } |
| 12089 | |
| 12090 | static SDValue PerformAddeSubeCombine(SDNode *N, |
| 12091 | TargetLowering::DAGCombinerInfo &DCI, |
| 12092 | const ARMSubtarget *Subtarget) { |
| 12093 | if (Subtarget->isThumb1Only()) { |
| 12094 | SelectionDAG &DAG = DCI.DAG; |
| 12095 | SDValue RHS = N->getOperand(1); |
| 12096 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) { |
| 12097 | int64_t imm = C->getSExtValue(); |
| 12098 | if (imm < 0) { |
| 12099 | SDLoc DL(N); |
| 12100 | |
| 12101 | // The with-carry-in form matches bitwise not instead of the negation. |
| 12102 | // Effectively, the inverse interpretation of the carry flag already |
| 12103 | // accounts for part of the negation. |
| 12104 | RHS = DAG.getConstant(~imm, DL, MVT::i32); |
| 12105 | |
| 12106 | unsigned Opcode = (N->getOpcode() == ARMISD::ADDE) ? ARMISD::SUBE |
| 12107 | : ARMISD::ADDE; |
| 12108 | return DAG.getNode(Opcode, DL, N->getVTList(), |
| 12109 | N->getOperand(0), RHS, N->getOperand(2)); |
| 12110 | } |
| 12111 | } |
| 12112 | } else if (N->getOperand(1)->getOpcode() == ISD::SMUL_LOHI) { |
| 12113 | return AddCombineTo64bitMLAL(N, DCI, Subtarget); |
| 12114 | } |
| 12115 | return SDValue(); |
| 12116 | } |
| 12117 | |
| 12118 | static SDValue PerformSELECTCombine(SDNode *N, |
| 12119 | TargetLowering::DAGCombinerInfo &DCI, |
| 12120 | const ARMSubtarget *Subtarget) { |
| 12121 | if (!Subtarget->hasMVEIntegerOps()) |
| 12122 | return SDValue(); |
| 12123 | |
| 12124 | SDLoc dl(N); |
| 12125 | SDValue SetCC; |
| 12126 | SDValue LHS; |
| 12127 | SDValue RHS; |
| 12128 | ISD::CondCode CC; |
| 12129 | SDValue TrueVal; |
| 12130 | SDValue FalseVal; |
| 12131 | |
| 12132 | if (N->getOpcode() == ISD::SELECT && |
| 12133 | N->getOperand(0)->getOpcode() == ISD::SETCC) { |
| 12134 | SetCC = N->getOperand(0); |
| 12135 | LHS = SetCC->getOperand(0); |
| 12136 | RHS = SetCC->getOperand(1); |
| 12137 | CC = cast<CondCodeSDNode>(SetCC->getOperand(2))->get(); |
| 12138 | TrueVal = N->getOperand(1); |
| 12139 | FalseVal = N->getOperand(2); |
| 12140 | } else if (N->getOpcode() == ISD::SELECT_CC) { |
| 12141 | LHS = N->getOperand(0); |
| 12142 | RHS = N->getOperand(1); |
| 12143 | CC = cast<CondCodeSDNode>(N->getOperand(4))->get(); |
| 12144 | TrueVal = N->getOperand(2); |
| 12145 | FalseVal = N->getOperand(3); |
| 12146 | } else { |
| 12147 | return SDValue(); |
| 12148 | } |
| 12149 | |
| 12150 | unsigned int Opcode = 0; |
| 12151 | if ((TrueVal->getOpcode() == ISD::VECREDUCE_UMIN || |
| 12152 | FalseVal->getOpcode() == ISD::VECREDUCE_UMIN) && |
| 12153 | (CC == ISD::SETULT || CC == ISD::SETUGT)) { |
| 12154 | Opcode = ARMISD::VMINVu; |
| 12155 | if (CC == ISD::SETUGT) |
| 12156 | std::swap(TrueVal, FalseVal); |
| 12157 | } else if ((TrueVal->getOpcode() == ISD::VECREDUCE_SMIN || |
| 12158 | FalseVal->getOpcode() == ISD::VECREDUCE_SMIN) && |
| 12159 | (CC == ISD::SETLT || CC == ISD::SETGT)) { |
| 12160 | Opcode = ARMISD::VMINVs; |
| 12161 | if (CC == ISD::SETGT) |
| 12162 | std::swap(TrueVal, FalseVal); |
| 12163 | } else if ((TrueVal->getOpcode() == ISD::VECREDUCE_UMAX || |
| 12164 | FalseVal->getOpcode() == ISD::VECREDUCE_UMAX) && |
| 12165 | (CC == ISD::SETUGT || CC == ISD::SETULT)) { |
| 12166 | Opcode = ARMISD::VMAXVu; |
| 12167 | if (CC == ISD::SETULT) |
| 12168 | std::swap(TrueVal, FalseVal); |
| 12169 | } else if ((TrueVal->getOpcode() == ISD::VECREDUCE_SMAX || |
| 12170 | FalseVal->getOpcode() == ISD::VECREDUCE_SMAX) && |
| 12171 | (CC == ISD::SETGT || CC == ISD::SETLT)) { |
| 12172 | Opcode = ARMISD::VMAXVs; |
| 12173 | if (CC == ISD::SETLT) |
| 12174 | std::swap(TrueVal, FalseVal); |
| 12175 | } else |
| 12176 | return SDValue(); |
| 12177 | |
| 12178 | // Normalise to the right hand side being the vector reduction |
| 12179 | switch (TrueVal->getOpcode()) { |
| 12180 | case ISD::VECREDUCE_UMIN: |
| 12181 | case ISD::VECREDUCE_SMIN: |
| 12182 | case ISD::VECREDUCE_UMAX: |
| 12183 | case ISD::VECREDUCE_SMAX: |
| 12184 | std::swap(LHS, RHS); |
| 12185 | std::swap(TrueVal, FalseVal); |
| 12186 | break; |
| 12187 | } |
| 12188 | |
| 12189 | EVT VectorType = FalseVal->getOperand(0).getValueType(); |
| 12190 | |
| 12191 | if (VectorType != MVT::v16i8 && VectorType != MVT::v8i16 && |
| 12192 | VectorType != MVT::v4i32) |
| 12193 | return SDValue(); |
| 12194 | |
| 12195 | EVT VectorScalarType = VectorType.getVectorElementType(); |
| 12196 | |
| 12197 | // The values being selected must also be the ones being compared |
| 12198 | if (TrueVal != LHS || FalseVal != RHS) |
| 12199 | return SDValue(); |
| 12200 | |
| 12201 | EVT LeftType = LHS->getValueType(0); |
| 12202 | EVT RightType = RHS->getValueType(0); |
| 12203 | |
| 12204 | // The types must match the reduced type too |
| 12205 | if (LeftType != VectorScalarType || RightType != VectorScalarType) |
| 12206 | return SDValue(); |
| 12207 | |
| 12208 | // Legalise the scalar to an i32 |
| 12209 | if (VectorScalarType != MVT::i32) |
| 12210 | LHS = DCI.DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, LHS); |
| 12211 | |
| 12212 | // Generate the reduction as an i32 for legalisation purposes |
| 12213 | auto Reduction = |
| 12214 | DCI.DAG.getNode(Opcode, dl, MVT::i32, LHS, RHS->getOperand(0)); |
| 12215 | |
| 12216 | // The result isn't actually an i32 so truncate it back to its original type |
| 12217 | if (VectorScalarType != MVT::i32) |
| 12218 | Reduction = DCI.DAG.getNode(ISD::TRUNCATE, dl, VectorScalarType, Reduction); |
| 12219 | |
| 12220 | return Reduction; |
| 12221 | } |
| 12222 | |
| 12223 | // A special combine for the vqdmulh family of instructions. This is one of the |
| 12224 | // potential set of patterns that could patch this instruction. The base pattern |
| 12225 | // you would expect to be min(max(ashr(mul(mul(sext(x), 2), sext(y)), 16))). |
| 12226 | // This matches the different min(max(ashr(mul(mul(sext(x), sext(y)), 2), 16))), |
| 12227 | // which llvm will have optimized to min(ashr(mul(sext(x), sext(y)), 15))) as |
| 12228 | // the max is unnecessary. |
| 12229 | static SDValue PerformVQDMULHCombine(SDNode *N, SelectionDAG &DAG) { |
| 12230 | EVT VT = N->getValueType(0); |
| 12231 | SDValue Shft; |
| 12232 | ConstantSDNode *Clamp; |
| 12233 | |
| 12234 | if (N->getOpcode() == ISD::SMIN) { |
| 12235 | Shft = N->getOperand(0); |
| 12236 | Clamp = isConstOrConstSplat(N->getOperand(1)); |
| 12237 | } else if (N->getOpcode() == ISD::VSELECT) { |
| 12238 | // Detect a SMIN, which for an i64 node will be a vselect/setcc, not a smin. |
| 12239 | SDValue Cmp = N->getOperand(0); |
| 12240 | if (Cmp.getOpcode() != ISD::SETCC || |
| 12241 | cast<CondCodeSDNode>(Cmp.getOperand(2))->get() != ISD::SETLT || |
| 12242 | Cmp.getOperand(0) != N->getOperand(1) || |
| 12243 | Cmp.getOperand(1) != N->getOperand(2)) |
| 12244 | return SDValue(); |
| 12245 | Shft = N->getOperand(1); |
| 12246 | Clamp = isConstOrConstSplat(N->getOperand(2)); |
| 12247 | } else |
| 12248 | return SDValue(); |
| 12249 | |
| 12250 | if (!Clamp) |
| 12251 | return SDValue(); |
| 12252 | |
| 12253 | MVT ScalarType; |
| 12254 | int ShftAmt = 0; |
| 12255 | switch (Clamp->getSExtValue()) { |
| 12256 | case (1 << 7) - 1: |
| 12257 | ScalarType = MVT::i8; |
| 12258 | ShftAmt = 7; |
| 12259 | break; |
| 12260 | case (1 << 15) - 1: |
| 12261 | ScalarType = MVT::i16; |
| 12262 | ShftAmt = 15; |
| 12263 | break; |
| 12264 | case (1ULL << 31) - 1: |
| 12265 | ScalarType = MVT::i32; |
| 12266 | ShftAmt = 31; |
| 12267 | break; |
| 12268 | default: |
| 12269 | return SDValue(); |
| 12270 | } |
| 12271 | |
| 12272 | if (Shft.getOpcode() != ISD::SRA) |
| 12273 | return SDValue(); |
| 12274 | ConstantSDNode *N1 = isConstOrConstSplat(Shft.getOperand(1)); |
| 12275 | if (!N1 || N1->getSExtValue() != ShftAmt) |
| 12276 | return SDValue(); |
| 12277 | |
| 12278 | SDValue Mul = Shft.getOperand(0); |
| 12279 | if (Mul.getOpcode() != ISD::MUL) |
| 12280 | return SDValue(); |
| 12281 | |
| 12282 | SDValue Ext0 = Mul.getOperand(0); |
| 12283 | SDValue Ext1 = Mul.getOperand(1); |
| 12284 | if (Ext0.getOpcode() != ISD::SIGN_EXTEND || |
| 12285 | Ext1.getOpcode() != ISD::SIGN_EXTEND) |
| 12286 | return SDValue(); |
| 12287 | EVT VecVT = Ext0.getOperand(0).getValueType(); |
| 12288 | if (VecVT != MVT::v4i32 && VecVT != MVT::v8i16 && VecVT != MVT::v16i8) |
| 12289 | return SDValue(); |
| 12290 | if (Ext1.getOperand(0).getValueType() != VecVT || |
| 12291 | VecVT.getScalarType() != ScalarType || |
| 12292 | VT.getScalarSizeInBits() < ScalarType.getScalarSizeInBits() * 2) |
| 12293 | return SDValue(); |
| 12294 | |
| 12295 | SDLoc DL(Mul); |
| 12296 | SDValue VQDMULH = DAG.getNode(ARMISD::VQDMULH, DL, VecVT, Ext0.getOperand(0), |
| 12297 | Ext1.getOperand(0)); |
| 12298 | return DAG.getNode(ISD::SIGN_EXTEND, DL, VT, VQDMULH); |
| 12299 | } |
| 12300 | |
| 12301 | static SDValue PerformVSELECTCombine(SDNode *N, |
| 12302 | TargetLowering::DAGCombinerInfo &DCI, |
| 12303 | const ARMSubtarget *Subtarget) { |
| 12304 | if (!Subtarget->hasMVEIntegerOps()) |
| 12305 | return SDValue(); |
| 12306 | |
| 12307 | if (SDValue V = PerformVQDMULHCombine(N, DCI.DAG)) |
| 12308 | return V; |
| 12309 | |
| 12310 | // Transforms vselect(not(cond), lhs, rhs) into vselect(cond, rhs, lhs). |
| 12311 | // |
| 12312 | // We need to re-implement this optimization here as the implementation in the |
| 12313 | // Target-Independent DAGCombiner does not handle the kind of constant we make |
| 12314 | // (it calls isConstOrConstSplat with AllowTruncation set to false - and for |
| 12315 | // good reason, allowing truncation there would break other targets). |
| 12316 | // |
| 12317 | // Currently, this is only done for MVE, as it's the only target that benefits |
| 12318 | // from this transformation (e.g. VPNOT+VPSEL becomes a single VPSEL). |
| 12319 | if (N->getOperand(0).getOpcode() != ISD::XOR) |
| 12320 | return SDValue(); |
| 12321 | SDValue XOR = N->getOperand(0); |
| 12322 | |
| 12323 | // Check if the XOR's RHS is either a 1, or a BUILD_VECTOR of 1s. |
| 12324 | // It is important to check with truncation allowed as the BUILD_VECTORs we |
| 12325 | // generate in those situations will truncate their operands. |
| 12326 | ConstantSDNode *Const = |
| 12327 | isConstOrConstSplat(XOR->getOperand(1), /*AllowUndefs*/ false, |
| 12328 | /*AllowTruncation*/ true); |
| 12329 | if (!Const || !Const->isOne()) |
| 12330 | return SDValue(); |
| 12331 | |
| 12332 | // Rewrite into vselect(cond, rhs, lhs). |
| 12333 | SDValue Cond = XOR->getOperand(0); |
| 12334 | SDValue LHS = N->getOperand(1); |
| 12335 | SDValue RHS = N->getOperand(2); |
| 12336 | EVT Type = N->getValueType(0); |
| 12337 | return DCI.DAG.getNode(ISD::VSELECT, SDLoc(N), Type, Cond, RHS, LHS); |
| 12338 | } |
| 12339 | |
| 12340 | static SDValue PerformABSCombine(SDNode *N, |
| 12341 | TargetLowering::DAGCombinerInfo &DCI, |
| 12342 | const ARMSubtarget *Subtarget) { |
| 12343 | SDValue res; |
| 12344 | SelectionDAG &DAG = DCI.DAG; |
| 12345 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 12346 | |
| 12347 | if (TLI.isOperationLegal(N->getOpcode(), N->getValueType(0))) |
| 12348 | return SDValue(); |
| 12349 | |
| 12350 | if (!TLI.expandABS(N, res, DAG)) |
| 12351 | return SDValue(); |
| 12352 | |
| 12353 | return res; |
| 12354 | } |
| 12355 | |
| 12356 | /// PerformADDECombine - Target-specific dag combine transform from |
| 12357 | /// ARMISD::ADDC, ARMISD::ADDE, and ISD::MUL_LOHI to MLAL or |
| 12358 | /// ARMISD::ADDC, ARMISD::ADDE and ARMISD::UMLAL to ARMISD::UMAAL |
| 12359 | static SDValue PerformADDECombine(SDNode *N, |
| 12360 | TargetLowering::DAGCombinerInfo &DCI, |
| 12361 | const ARMSubtarget *Subtarget) { |
| 12362 | // Only ARM and Thumb2 support UMLAL/SMLAL. |
| 12363 | if (Subtarget->isThumb1Only()) |
| 12364 | return PerformAddeSubeCombine(N, DCI, Subtarget); |
| 12365 | |
| 12366 | // Only perform the checks after legalize when the pattern is available. |
| 12367 | if (DCI.isBeforeLegalize()) return SDValue(); |
| 12368 | |
| 12369 | return AddCombineTo64bitUMAAL(N, DCI, Subtarget); |
| 12370 | } |
| 12371 | |
| 12372 | /// PerformADDCombineWithOperands - Try DAG combinations for an ADD with |
| 12373 | /// operands N0 and N1. This is a helper for PerformADDCombine that is |
| 12374 | /// called with the default operands, and if that fails, with commuted |
| 12375 | /// operands. |
| 12376 | static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, |
| 12377 | TargetLowering::DAGCombinerInfo &DCI, |
| 12378 | const ARMSubtarget *Subtarget){ |
| 12379 | // Attempt to create vpadd for this add. |
| 12380 | if (SDValue Result = AddCombineToVPADD(N, N0, N1, DCI, Subtarget)) |
| 12381 | return Result; |
| 12382 | |
| 12383 | // Attempt to create vpaddl for this add. |
| 12384 | if (SDValue Result = AddCombineVUZPToVPADDL(N, N0, N1, DCI, Subtarget)) |
| 12385 | return Result; |
| 12386 | if (SDValue Result = AddCombineBUILD_VECTORToVPADDL(N, N0, N1, DCI, |
| 12387 | Subtarget)) |
| 12388 | return Result; |
| 12389 | |
| 12390 | // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) |
| 12391 | if (N0.getNode()->hasOneUse()) |
| 12392 | if (SDValue Result = combineSelectAndUse(N, N0, N1, DCI)) |
| 12393 | return Result; |
| 12394 | return SDValue(); |
| 12395 | } |
| 12396 | |
| 12397 | static SDValue PerformADDVecReduce(SDNode *N, |
| 12398 | TargetLowering::DAGCombinerInfo &DCI, |
| 12399 | const ARMSubtarget *Subtarget) { |
| 12400 | if (!Subtarget->hasMVEIntegerOps() || N->getValueType(0) != MVT::i64) |
| 12401 | return SDValue(); |
| 12402 | |
| 12403 | SDValue N0 = N->getOperand(0); |
| 12404 | SDValue N1 = N->getOperand(1); |
| 12405 | |
| 12406 | // We are looking for a i64 add of a VADDLVx. Due to these being i64's, this |
| 12407 | // will look like: |
| 12408 | // t1: i32,i32 = ARMISD::VADDLVs x |
| 12409 | // t2: i64 = build_pair t1, t1:1 |
| 12410 | // t3: i64 = add t2, y |
| 12411 | // We also need to check for sext / zext and commutitive adds. |
| 12412 | auto MakeVecReduce = [&](unsigned Opcode, unsigned OpcodeA, SDValue NA, |
| 12413 | SDValue NB) { |
| 12414 | if (NB->getOpcode() != ISD::BUILD_PAIR) |
| 12415 | return SDValue(); |
| 12416 | SDValue VecRed = NB->getOperand(0); |
| 12417 | if (VecRed->getOpcode() != Opcode || VecRed.getResNo() != 0 || |
| 12418 | NB->getOperand(1) != SDValue(VecRed.getNode(), 1)) |
| 12419 | return SDValue(); |
| 12420 | |
| 12421 | SDLoc dl(N); |
| 12422 | SmallVector<SDValue, 4> Ops; |
| 12423 | Ops.push_back(DCI.DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, NA, |
| 12424 | DCI.DAG.getConstant(0, dl, MVT::i32))); |
| 12425 | Ops.push_back(DCI.DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, NA, |
| 12426 | DCI.DAG.getConstant(1, dl, MVT::i32))); |
| 12427 | for (unsigned i = 0, e = VecRed.getNumOperands(); i < e; i++) |
| 12428 | Ops.push_back(VecRed->getOperand(i)); |
| 12429 | SDValue Red = DCI.DAG.getNode(OpcodeA, dl, |
| 12430 | DCI.DAG.getVTList({MVT::i32, MVT::i32}), Ops); |
| 12431 | return DCI.DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Red, |
| 12432 | SDValue(Red.getNode(), 1)); |
| 12433 | }; |
| 12434 | |
| 12435 | if (SDValue M = MakeVecReduce(ARMISD::VADDLVs, ARMISD::VADDLVAs, N0, N1)) |
| 12436 | return M; |
| 12437 | if (SDValue M = MakeVecReduce(ARMISD::VADDLVu, ARMISD::VADDLVAu, N0, N1)) |
| 12438 | return M; |
| 12439 | if (SDValue M = MakeVecReduce(ARMISD::VADDLVs, ARMISD::VADDLVAs, N1, N0)) |
| 12440 | return M; |
| 12441 | if (SDValue M = MakeVecReduce(ARMISD::VADDLVu, ARMISD::VADDLVAu, N1, N0)) |
| 12442 | return M; |
| 12443 | if (SDValue M = MakeVecReduce(ARMISD::VADDLVps, ARMISD::VADDLVAps, N0, N1)) |
| 12444 | return M; |
| 12445 | if (SDValue M = MakeVecReduce(ARMISD::VADDLVpu, ARMISD::VADDLVApu, N0, N1)) |
| 12446 | return M; |
| 12447 | if (SDValue M = MakeVecReduce(ARMISD::VADDLVps, ARMISD::VADDLVAps, N1, N0)) |
| 12448 | return M; |
| 12449 | if (SDValue M = MakeVecReduce(ARMISD::VADDLVpu, ARMISD::VADDLVApu, N1, N0)) |
| 12450 | return M; |
| 12451 | if (SDValue M = MakeVecReduce(ARMISD::VMLALVs, ARMISD::VMLALVAs, N0, N1)) |
| 12452 | return M; |
| 12453 | if (SDValue M = MakeVecReduce(ARMISD::VMLALVu, ARMISD::VMLALVAu, N0, N1)) |
| 12454 | return M; |
| 12455 | if (SDValue M = MakeVecReduce(ARMISD::VMLALVs, ARMISD::VMLALVAs, N1, N0)) |
| 12456 | return M; |
| 12457 | if (SDValue M = MakeVecReduce(ARMISD::VMLALVu, ARMISD::VMLALVAu, N1, N0)) |
| 12458 | return M; |
| 12459 | if (SDValue M = MakeVecReduce(ARMISD::VMLALVps, ARMISD::VMLALVAps, N0, N1)) |
| 12460 | return M; |
| 12461 | if (SDValue M = MakeVecReduce(ARMISD::VMLALVpu, ARMISD::VMLALVApu, N0, N1)) |
| 12462 | return M; |
| 12463 | if (SDValue M = MakeVecReduce(ARMISD::VMLALVps, ARMISD::VMLALVAps, N1, N0)) |
| 12464 | return M; |
| 12465 | if (SDValue M = MakeVecReduce(ARMISD::VMLALVpu, ARMISD::VMLALVApu, N1, N0)) |
| 12466 | return M; |
| 12467 | return SDValue(); |
| 12468 | } |
| 12469 | |
| 12470 | bool |
| 12471 | ARMTargetLowering::isDesirableToCommuteWithShift(const SDNode *N, |
| 12472 | CombineLevel Level) const { |
| 12473 | if (Level == BeforeLegalizeTypes) |
| 12474 | return true; |
| 12475 | |
| 12476 | if (N->getOpcode() != ISD::SHL) |
| 12477 | return true; |
| 12478 | |
| 12479 | if (Subtarget->isThumb1Only()) { |
| 12480 | // Avoid making expensive immediates by commuting shifts. (This logic |
| 12481 | // only applies to Thumb1 because ARM and Thumb2 immediates can be shifted |
| 12482 | // for free.) |
| 12483 | if (N->getOpcode() != ISD::SHL) |
| 12484 | return true; |
| 12485 | SDValue N1 = N->getOperand(0); |
| 12486 | if (N1->getOpcode() != ISD::ADD && N1->getOpcode() != ISD::AND && |
| 12487 | N1->getOpcode() != ISD::OR && N1->getOpcode() != ISD::XOR) |
| 12488 | return true; |
| 12489 | if (auto *Const = dyn_cast<ConstantSDNode>(N1->getOperand(1))) { |
| 12490 | if (Const->getAPIntValue().ult(256)) |
| 12491 | return false; |
| 12492 | if (N1->getOpcode() == ISD::ADD && Const->getAPIntValue().slt(0) && |
| 12493 | Const->getAPIntValue().sgt(-256)) |
| 12494 | return false; |
| 12495 | } |
| 12496 | return true; |
| 12497 | } |
| 12498 | |
| 12499 | // Turn off commute-with-shift transform after legalization, so it doesn't |
| 12500 | // conflict with PerformSHLSimplify. (We could try to detect when |
| 12501 | // PerformSHLSimplify would trigger more precisely, but it isn't |
| 12502 | // really necessary.) |
| 12503 | return false; |
| 12504 | } |
| 12505 | |
| 12506 | bool ARMTargetLowering::shouldFoldConstantShiftPairToMask( |
| 12507 | const SDNode *N, CombineLevel Level) const { |
| 12508 | if (!Subtarget->isThumb1Only()) |
| 12509 | return true; |
| 12510 | |
| 12511 | if (Level == BeforeLegalizeTypes) |
| 12512 | return true; |
| 12513 | |
| 12514 | return false; |
| 12515 | } |
| 12516 | |
| 12517 | bool ARMTargetLowering::preferIncOfAddToSubOfNot(EVT VT) const { |
| 12518 | if (!Subtarget->hasNEON()) { |
| 12519 | if (Subtarget->isThumb1Only()) |
| 12520 | return VT.getScalarSizeInBits() <= 32; |
| 12521 | return true; |
| 12522 | } |
| 12523 | return VT.isScalarInteger(); |
| 12524 | } |
| 12525 | |
| 12526 | static SDValue PerformSHLSimplify(SDNode *N, |
| 12527 | TargetLowering::DAGCombinerInfo &DCI, |
| 12528 | const ARMSubtarget *ST) { |
| 12529 | // Allow the generic combiner to identify potential bswaps. |
| 12530 | if (DCI.isBeforeLegalize()) |
| 12531 | return SDValue(); |
| 12532 | |
| 12533 | // DAG combiner will fold: |
| 12534 | // (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2) |
| 12535 | // (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2 |
| 12536 | // Other code patterns that can be also be modified have the following form: |
| 12537 | // b + ((a << 1) | 510) |
| 12538 | // b + ((a << 1) & 510) |
| 12539 | // b + ((a << 1) ^ 510) |
| 12540 | // b + ((a << 1) + 510) |
| 12541 | |
| 12542 | // Many instructions can perform the shift for free, but it requires both |
| 12543 | // the operands to be registers. If c1 << c2 is too large, a mov immediate |
| 12544 | // instruction will needed. So, unfold back to the original pattern if: |
| 12545 | // - if c1 and c2 are small enough that they don't require mov imms. |
| 12546 | // - the user(s) of the node can perform an shl |
| 12547 | |
| 12548 | // No shifted operands for 16-bit instructions. |
| 12549 | if (ST->isThumb() && ST->isThumb1Only()) |
| 12550 | return SDValue(); |
| 12551 | |
| 12552 | // Check that all the users could perform the shl themselves. |
| 12553 | for (auto U : N->uses()) { |
| 12554 | switch(U->getOpcode()) { |
| 12555 | default: |
| 12556 | return SDValue(); |
| 12557 | case ISD::SUB: |
| 12558 | case ISD::ADD: |
| 12559 | case ISD::AND: |
| 12560 | case ISD::OR: |
| 12561 | case ISD::XOR: |
| 12562 | case ISD::SETCC: |
| 12563 | case ARMISD::CMP: |
| 12564 | // Check that the user isn't already using a constant because there |
| 12565 | // aren't any instructions that support an immediate operand and a |
| 12566 | // shifted operand. |
| 12567 | if (isa<ConstantSDNode>(U->getOperand(0)) || |
| 12568 | isa<ConstantSDNode>(U->getOperand(1))) |
| 12569 | return SDValue(); |
| 12570 | |
| 12571 | // Check that it's not already using a shift. |
| 12572 | if (U->getOperand(0).getOpcode() == ISD::SHL || |
| 12573 | U->getOperand(1).getOpcode() == ISD::SHL) |
| 12574 | return SDValue(); |
| 12575 | break; |
| 12576 | } |
| 12577 | } |
| 12578 | |
| 12579 | if (N->getOpcode() != ISD::ADD && N->getOpcode() != ISD::OR && |
| 12580 | N->getOpcode() != ISD::XOR && N->getOpcode() != ISD::AND) |
| 12581 | return SDValue(); |
| 12582 | |
| 12583 | if (N->getOperand(0).getOpcode() != ISD::SHL) |
| 12584 | return SDValue(); |
| 12585 | |
| 12586 | SDValue SHL = N->getOperand(0); |
| 12587 | |
| 12588 | auto *C1ShlC2 = dyn_cast<ConstantSDNode>(N->getOperand(1)); |
| 12589 | auto *C2 = dyn_cast<ConstantSDNode>(SHL.getOperand(1)); |
| 12590 | if (!C1ShlC2 || !C2) |
| 12591 | return SDValue(); |
| 12592 | |
| 12593 | APInt C2Int = C2->getAPIntValue(); |
| 12594 | APInt C1Int = C1ShlC2->getAPIntValue(); |
| 12595 | |
| 12596 | // Check that performing a lshr will not lose any information. |
| 12597 | APInt Mask = APInt::getHighBitsSet(C2Int.getBitWidth(), |
| 12598 | C2Int.getBitWidth() - C2->getZExtValue()); |
| 12599 | if ((C1Int & Mask) != C1Int) |
| 12600 | return SDValue(); |
| 12601 | |
| 12602 | // Shift the first constant. |
| 12603 | C1Int.lshrInPlace(C2Int); |
| 12604 | |
| 12605 | // The immediates are encoded as an 8-bit value that can be rotated. |
| 12606 | auto LargeImm = [](const APInt &Imm) { |
| 12607 | unsigned Zeros = Imm.countLeadingZeros() + Imm.countTrailingZeros(); |
| 12608 | return Imm.getBitWidth() - Zeros > 8; |
| 12609 | }; |
| 12610 | |
| 12611 | if (LargeImm(C1Int) || LargeImm(C2Int)) |
| 12612 | return SDValue(); |
| 12613 | |
| 12614 | SelectionDAG &DAG = DCI.DAG; |
| 12615 | SDLoc dl(N); |
| 12616 | SDValue X = SHL.getOperand(0); |
| 12617 | SDValue BinOp = DAG.getNode(N->getOpcode(), dl, MVT::i32, X, |
| 12618 | DAG.getConstant(C1Int, dl, MVT::i32)); |
| 12619 | // Shift left to compensate for the lshr of C1Int. |
| 12620 | SDValue Res = DAG.getNode(ISD::SHL, dl, MVT::i32, BinOp, SHL.getOperand(1)); |
| 12621 | |
| 12622 | LLVM_DEBUG(dbgs() << "Simplify shl use:\n" ; SHL.getOperand(0).dump(); |
| 12623 | SHL.dump(); N->dump()); |
| 12624 | LLVM_DEBUG(dbgs() << "Into:\n" ; X.dump(); BinOp.dump(); Res.dump()); |
| 12625 | return Res; |
| 12626 | } |
| 12627 | |
| 12628 | |
| 12629 | /// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD. |
| 12630 | /// |
| 12631 | static SDValue PerformADDCombine(SDNode *N, |
| 12632 | TargetLowering::DAGCombinerInfo &DCI, |
| 12633 | const ARMSubtarget *Subtarget) { |
| 12634 | SDValue N0 = N->getOperand(0); |
| 12635 | SDValue N1 = N->getOperand(1); |
| 12636 | |
| 12637 | // Only works one way, because it needs an immediate operand. |
| 12638 | if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget)) |
| 12639 | return Result; |
| 12640 | |
| 12641 | if (SDValue Result = PerformADDVecReduce(N, DCI, Subtarget)) |
| 12642 | return Result; |
| 12643 | |
| 12644 | // First try with the default operand order. |
| 12645 | if (SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget)) |
| 12646 | return Result; |
| 12647 | |
| 12648 | // If that didn't work, try again with the operands commuted. |
| 12649 | return PerformADDCombineWithOperands(N, N1, N0, DCI, Subtarget); |
| 12650 | } |
| 12651 | |
| 12652 | /// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB. |
| 12653 | /// |
| 12654 | static SDValue PerformSUBCombine(SDNode *N, |
| 12655 | TargetLowering::DAGCombinerInfo &DCI, |
| 12656 | const ARMSubtarget *Subtarget) { |
| 12657 | SDValue N0 = N->getOperand(0); |
| 12658 | SDValue N1 = N->getOperand(1); |
| 12659 | |
| 12660 | // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) |
| 12661 | if (N1.getNode()->hasOneUse()) |
| 12662 | if (SDValue Result = combineSelectAndUse(N, N1, N0, DCI)) |
| 12663 | return Result; |
| 12664 | |
| 12665 | if (!Subtarget->hasMVEIntegerOps() || !N->getValueType(0).isVector()) |
| 12666 | return SDValue(); |
| 12667 | |
| 12668 | // Fold (sub (ARMvmovImm 0), (ARMvdup x)) -> (ARMvdup (sub 0, x)) |
| 12669 | // so that we can readily pattern match more mve instructions which can use |
| 12670 | // a scalar operand. |
| 12671 | SDValue VDup = N->getOperand(1); |
| 12672 | if (VDup->getOpcode() != ARMISD::VDUP) |
| 12673 | return SDValue(); |
| 12674 | |
| 12675 | SDValue VMov = N->getOperand(0); |
| 12676 | if (VMov->getOpcode() == ISD::BITCAST) |
| 12677 | VMov = VMov->getOperand(0); |
| 12678 | |
| 12679 | if (VMov->getOpcode() != ARMISD::VMOVIMM || !isZeroVector(VMov)) |
| 12680 | return SDValue(); |
| 12681 | |
| 12682 | SDLoc dl(N); |
| 12683 | SDValue Negate = DCI.DAG.getNode(ISD::SUB, dl, MVT::i32, |
| 12684 | DCI.DAG.getConstant(0, dl, MVT::i32), |
| 12685 | VDup->getOperand(0)); |
| 12686 | return DCI.DAG.getNode(ARMISD::VDUP, dl, N->getValueType(0), Negate); |
| 12687 | } |
| 12688 | |
| 12689 | /// PerformVMULCombine |
| 12690 | /// Distribute (A + B) * C to (A * C) + (B * C) to take advantage of the |
| 12691 | /// special multiplier accumulator forwarding. |
| 12692 | /// vmul d3, d0, d2 |
| 12693 | /// vmla d3, d1, d2 |
| 12694 | /// is faster than |
| 12695 | /// vadd d3, d0, d1 |
| 12696 | /// vmul d3, d3, d2 |
| 12697 | // However, for (A + B) * (A + B), |
| 12698 | // vadd d2, d0, d1 |
| 12699 | // vmul d3, d0, d2 |
| 12700 | // vmla d3, d1, d2 |
| 12701 | // is slower than |
| 12702 | // vadd d2, d0, d1 |
| 12703 | // vmul d3, d2, d2 |
| 12704 | static SDValue PerformVMULCombine(SDNode *N, |
| 12705 | TargetLowering::DAGCombinerInfo &DCI, |
| 12706 | const ARMSubtarget *Subtarget) { |
| 12707 | if (!Subtarget->hasVMLxForwarding()) |
| 12708 | return SDValue(); |
| 12709 | |
| 12710 | SelectionDAG &DAG = DCI.DAG; |
| 12711 | SDValue N0 = N->getOperand(0); |
| 12712 | SDValue N1 = N->getOperand(1); |
| 12713 | unsigned Opcode = N0.getOpcode(); |
| 12714 | if (Opcode != ISD::ADD && Opcode != ISD::SUB && |
| 12715 | Opcode != ISD::FADD && Opcode != ISD::FSUB) { |
| 12716 | Opcode = N1.getOpcode(); |
| 12717 | if (Opcode != ISD::ADD && Opcode != ISD::SUB && |
| 12718 | Opcode != ISD::FADD && Opcode != ISD::FSUB) |
| 12719 | return SDValue(); |
| 12720 | std::swap(N0, N1); |
| 12721 | } |
| 12722 | |
| 12723 | if (N0 == N1) |
| 12724 | return SDValue(); |
| 12725 | |
| 12726 | EVT VT = N->getValueType(0); |
| 12727 | SDLoc DL(N); |
| 12728 | SDValue N00 = N0->getOperand(0); |
| 12729 | SDValue N01 = N0->getOperand(1); |
| 12730 | return DAG.getNode(Opcode, DL, VT, |
| 12731 | DAG.getNode(ISD::MUL, DL, VT, N00, N1), |
| 12732 | DAG.getNode(ISD::MUL, DL, VT, N01, N1)); |
| 12733 | } |
| 12734 | |
| 12735 | static SDValue PerformMVEVMULLCombine(SDNode *N, SelectionDAG &DAG, |
| 12736 | const ARMSubtarget *Subtarget) { |
| 12737 | EVT VT = N->getValueType(0); |
| 12738 | if (VT != MVT::v2i64) |
| 12739 | return SDValue(); |
| 12740 | |
| 12741 | SDValue N0 = N->getOperand(0); |
| 12742 | SDValue N1 = N->getOperand(1); |
| 12743 | |
| 12744 | auto IsSignExt = [&](SDValue Op) { |
| 12745 | if (Op->getOpcode() != ISD::SIGN_EXTEND_INREG) |
| 12746 | return SDValue(); |
| 12747 | EVT VT = cast<VTSDNode>(Op->getOperand(1))->getVT(); |
| 12748 | if (VT.getScalarSizeInBits() == 32) |
| 12749 | return Op->getOperand(0); |
| 12750 | return SDValue(); |
| 12751 | }; |
| 12752 | auto IsZeroExt = [&](SDValue Op) { |
| 12753 | // Zero extends are a little more awkward. At the point we are matching |
| 12754 | // this, we are looking for an AND with a (-1, 0, -1, 0) buildvector mask. |
| 12755 | // That might be before of after a bitcast depending on how the and is |
| 12756 | // placed. Because this has to look through bitcasts, it is currently only |
| 12757 | // supported on LE. |
| 12758 | if (!Subtarget->isLittle()) |
| 12759 | return SDValue(); |
| 12760 | |
| 12761 | SDValue And = Op; |
| 12762 | if (And->getOpcode() == ISD::BITCAST) |
| 12763 | And = And->getOperand(0); |
| 12764 | if (And->getOpcode() != ISD::AND) |
| 12765 | return SDValue(); |
| 12766 | SDValue Mask = And->getOperand(1); |
| 12767 | if (Mask->getOpcode() == ISD::BITCAST) |
| 12768 | Mask = Mask->getOperand(0); |
| 12769 | |
| 12770 | if (Mask->getOpcode() != ISD::BUILD_VECTOR || |
| 12771 | Mask.getValueType() != MVT::v4i32) |
| 12772 | return SDValue(); |
| 12773 | if (isAllOnesConstant(Mask->getOperand(0)) && |
| 12774 | isNullConstant(Mask->getOperand(1)) && |
| 12775 | isAllOnesConstant(Mask->getOperand(2)) && |
| 12776 | isNullConstant(Mask->getOperand(3))) |
| 12777 | return And->getOperand(0); |
| 12778 | return SDValue(); |
| 12779 | }; |
| 12780 | |
| 12781 | SDLoc dl(N); |
| 12782 | if (SDValue Op0 = IsSignExt(N0)) { |
| 12783 | if (SDValue Op1 = IsSignExt(N1)) { |
| 12784 | SDValue New0a = DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, MVT::v4i32, Op0); |
| 12785 | SDValue New1a = DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, MVT::v4i32, Op1); |
| 12786 | return DAG.getNode(ARMISD::VMULLs, dl, VT, New0a, New1a); |
| 12787 | } |
| 12788 | } |
| 12789 | if (SDValue Op0 = IsZeroExt(N0)) { |
| 12790 | if (SDValue Op1 = IsZeroExt(N1)) { |
| 12791 | SDValue New0a = DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, MVT::v4i32, Op0); |
| 12792 | SDValue New1a = DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, MVT::v4i32, Op1); |
| 12793 | return DAG.getNode(ARMISD::VMULLu, dl, VT, New0a, New1a); |
| 12794 | } |
| 12795 | } |
| 12796 | |
| 12797 | return SDValue(); |
| 12798 | } |
| 12799 | |
| 12800 | static SDValue PerformMULCombine(SDNode *N, |
| 12801 | TargetLowering::DAGCombinerInfo &DCI, |
| 12802 | const ARMSubtarget *Subtarget) { |
| 12803 | SelectionDAG &DAG = DCI.DAG; |
| 12804 | |
| 12805 | EVT VT = N->getValueType(0); |
| 12806 | if (Subtarget->hasMVEIntegerOps() && VT == MVT::v2i64) |
| 12807 | return PerformMVEVMULLCombine(N, DAG, Subtarget); |
| 12808 | |
| 12809 | if (Subtarget->isThumb1Only()) |
| 12810 | return SDValue(); |
| 12811 | |
| 12812 | if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) |
| 12813 | return SDValue(); |
| 12814 | |
| 12815 | if (VT.is64BitVector() || VT.is128BitVector()) |
| 12816 | return PerformVMULCombine(N, DCI, Subtarget); |
| 12817 | if (VT != MVT::i32) |
| 12818 | return SDValue(); |
| 12819 | |
| 12820 | ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)); |
| 12821 | if (!C) |
| 12822 | return SDValue(); |
| 12823 | |
| 12824 | int64_t MulAmt = C->getSExtValue(); |
| 12825 | unsigned ShiftAmt = countTrailingZeros<uint64_t>(MulAmt); |
| 12826 | |
| 12827 | ShiftAmt = ShiftAmt & (32 - 1); |
| 12828 | SDValue V = N->getOperand(0); |
| 12829 | SDLoc DL(N); |
| 12830 | |
| 12831 | SDValue Res; |
| 12832 | MulAmt >>= ShiftAmt; |
| 12833 | |
| 12834 | if (MulAmt >= 0) { |
| 12835 | if (isPowerOf2_32(MulAmt - 1)) { |
| 12836 | // (mul x, 2^N + 1) => (add (shl x, N), x) |
| 12837 | Res = DAG.getNode(ISD::ADD, DL, VT, |
| 12838 | V, |
| 12839 | DAG.getNode(ISD::SHL, DL, VT, |
| 12840 | V, |
| 12841 | DAG.getConstant(Log2_32(MulAmt - 1), DL, |
| 12842 | MVT::i32))); |
| 12843 | } else if (isPowerOf2_32(MulAmt + 1)) { |
| 12844 | // (mul x, 2^N - 1) => (sub (shl x, N), x) |
| 12845 | Res = DAG.getNode(ISD::SUB, DL, VT, |
| 12846 | DAG.getNode(ISD::SHL, DL, VT, |
| 12847 | V, |
| 12848 | DAG.getConstant(Log2_32(MulAmt + 1), DL, |
| 12849 | MVT::i32)), |
| 12850 | V); |
| 12851 | } else |
| 12852 | return SDValue(); |
| 12853 | } else { |
| 12854 | uint64_t MulAmtAbs = -MulAmt; |
| 12855 | if (isPowerOf2_32(MulAmtAbs + 1)) { |
| 12856 | // (mul x, -(2^N - 1)) => (sub x, (shl x, N)) |
| 12857 | Res = DAG.getNode(ISD::SUB, DL, VT, |
| 12858 | V, |
| 12859 | DAG.getNode(ISD::SHL, DL, VT, |
| 12860 | V, |
| 12861 | DAG.getConstant(Log2_32(MulAmtAbs + 1), DL, |
| 12862 | MVT::i32))); |
| 12863 | } else if (isPowerOf2_32(MulAmtAbs - 1)) { |
| 12864 | // (mul x, -(2^N + 1)) => - (add (shl x, N), x) |
| 12865 | Res = DAG.getNode(ISD::ADD, DL, VT, |
| 12866 | V, |
| 12867 | DAG.getNode(ISD::SHL, DL, VT, |
| 12868 | V, |
| 12869 | DAG.getConstant(Log2_32(MulAmtAbs - 1), DL, |
| 12870 | MVT::i32))); |
| 12871 | Res = DAG.getNode(ISD::SUB, DL, VT, |
| 12872 | DAG.getConstant(0, DL, MVT::i32), Res); |
| 12873 | } else |
| 12874 | return SDValue(); |
| 12875 | } |
| 12876 | |
| 12877 | if (ShiftAmt != 0) |
| 12878 | Res = DAG.getNode(ISD::SHL, DL, VT, |
| 12879 | Res, DAG.getConstant(ShiftAmt, DL, MVT::i32)); |
| 12880 | |
| 12881 | // Do not add new nodes to DAG combiner worklist. |
| 12882 | DCI.CombineTo(N, Res, false); |
| 12883 | return SDValue(); |
| 12884 | } |
| 12885 | |
| 12886 | static SDValue CombineANDShift(SDNode *N, |
| 12887 | TargetLowering::DAGCombinerInfo &DCI, |
| 12888 | const ARMSubtarget *Subtarget) { |
| 12889 | // Allow DAGCombine to pattern-match before we touch the canonical form. |
| 12890 | if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) |
| 12891 | return SDValue(); |
| 12892 | |
| 12893 | if (N->getValueType(0) != MVT::i32) |
| 12894 | return SDValue(); |
| 12895 | |
| 12896 | ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1)); |
| 12897 | if (!N1C) |
| 12898 | return SDValue(); |
| 12899 | |
| 12900 | uint32_t C1 = (uint32_t)N1C->getZExtValue(); |
| 12901 | // Don't transform uxtb/uxth. |
| 12902 | if (C1 == 255 || C1 == 65535) |
| 12903 | return SDValue(); |
| 12904 | |
| 12905 | SDNode *N0 = N->getOperand(0).getNode(); |
| 12906 | if (!N0->hasOneUse()) |
| 12907 | return SDValue(); |
| 12908 | |
| 12909 | if (N0->getOpcode() != ISD::SHL && N0->getOpcode() != ISD::SRL) |
| 12910 | return SDValue(); |
| 12911 | |
| 12912 | bool LeftShift = N0->getOpcode() == ISD::SHL; |
| 12913 | |
| 12914 | ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0->getOperand(1)); |
| 12915 | if (!N01C) |
| 12916 | return SDValue(); |
| 12917 | |
| 12918 | uint32_t C2 = (uint32_t)N01C->getZExtValue(); |
| 12919 | if (!C2 || C2 >= 32) |
| 12920 | return SDValue(); |
| 12921 | |
| 12922 | // Clear irrelevant bits in the mask. |
| 12923 | if (LeftShift) |
| 12924 | C1 &= (-1U << C2); |
| 12925 | else |
| 12926 | C1 &= (-1U >> C2); |
| 12927 | |
| 12928 | SelectionDAG &DAG = DCI.DAG; |
| 12929 | SDLoc DL(N); |
| 12930 | |
| 12931 | // We have a pattern of the form "(and (shl x, c2) c1)" or |
| 12932 | // "(and (srl x, c2) c1)", where c1 is a shifted mask. Try to |
| 12933 | // transform to a pair of shifts, to save materializing c1. |
| 12934 | |
| 12935 | // First pattern: right shift, then mask off leading bits. |
| 12936 | // FIXME: Use demanded bits? |
| 12937 | if (!LeftShift && isMask_32(C1)) { |
| 12938 | uint32_t C3 = countLeadingZeros(C1); |
| 12939 | if (C2 < C3) { |
| 12940 | SDValue SHL = DAG.getNode(ISD::SHL, DL, MVT::i32, N0->getOperand(0), |
| 12941 | DAG.getConstant(C3 - C2, DL, MVT::i32)); |
| 12942 | return DAG.getNode(ISD::SRL, DL, MVT::i32, SHL, |
| 12943 | DAG.getConstant(C3, DL, MVT::i32)); |
| 12944 | } |
| 12945 | } |
| 12946 | |
| 12947 | // First pattern, reversed: left shift, then mask off trailing bits. |
| 12948 | if (LeftShift && isMask_32(~C1)) { |
| 12949 | uint32_t C3 = countTrailingZeros(C1); |
| 12950 | if (C2 < C3) { |
| 12951 | SDValue SHL = DAG.getNode(ISD::SRL, DL, MVT::i32, N0->getOperand(0), |
| 12952 | DAG.getConstant(C3 - C2, DL, MVT::i32)); |
| 12953 | return DAG.getNode(ISD::SHL, DL, MVT::i32, SHL, |
| 12954 | DAG.getConstant(C3, DL, MVT::i32)); |
| 12955 | } |
| 12956 | } |
| 12957 | |
| 12958 | // Second pattern: left shift, then mask off leading bits. |
| 12959 | // FIXME: Use demanded bits? |
| 12960 | if (LeftShift && isShiftedMask_32(C1)) { |
| 12961 | uint32_t Trailing = countTrailingZeros(C1); |
| 12962 | uint32_t C3 = countLeadingZeros(C1); |
| 12963 | if (Trailing == C2 && C2 + C3 < 32) { |
| 12964 | SDValue SHL = DAG.getNode(ISD::SHL, DL, MVT::i32, N0->getOperand(0), |
| 12965 | DAG.getConstant(C2 + C3, DL, MVT::i32)); |
| 12966 | return DAG.getNode(ISD::SRL, DL, MVT::i32, SHL, |
| 12967 | DAG.getConstant(C3, DL, MVT::i32)); |
| 12968 | } |
| 12969 | } |
| 12970 | |
| 12971 | // Second pattern, reversed: right shift, then mask off trailing bits. |
| 12972 | // FIXME: Handle other patterns of known/demanded bits. |
| 12973 | if (!LeftShift && isShiftedMask_32(C1)) { |
| 12974 | uint32_t Leading = countLeadingZeros(C1); |
| 12975 | uint32_t C3 = countTrailingZeros(C1); |
| 12976 | if (Leading == C2 && C2 + C3 < 32) { |
| 12977 | SDValue SHL = DAG.getNode(ISD::SRL, DL, MVT::i32, N0->getOperand(0), |
| 12978 | DAG.getConstant(C2 + C3, DL, MVT::i32)); |
| 12979 | return DAG.getNode(ISD::SHL, DL, MVT::i32, SHL, |
| 12980 | DAG.getConstant(C3, DL, MVT::i32)); |
| 12981 | } |
| 12982 | } |
| 12983 | |
| 12984 | // FIXME: Transform "(and (shl x, c2) c1)" -> |
| 12985 | // "(shl (and x, c1>>c2), c2)" if "c1 >> c2" is a cheaper immediate than |
| 12986 | // c1. |
| 12987 | return SDValue(); |
| 12988 | } |
| 12989 | |
| 12990 | static SDValue PerformANDCombine(SDNode *N, |
| 12991 | TargetLowering::DAGCombinerInfo &DCI, |
| 12992 | const ARMSubtarget *Subtarget) { |
| 12993 | // Attempt to use immediate-form VBIC |
| 12994 | BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); |
| 12995 | SDLoc dl(N); |
| 12996 | EVT VT = N->getValueType(0); |
| 12997 | SelectionDAG &DAG = DCI.DAG; |
| 12998 | |
| 12999 | if (!DAG.getTargetLoweringInfo().isTypeLegal(VT) || VT == MVT::v4i1 || |
| 13000 | VT == MVT::v8i1 || VT == MVT::v16i1) |
| 13001 | return SDValue(); |
| 13002 | |
| 13003 | APInt SplatBits, SplatUndef; |
| 13004 | unsigned SplatBitSize; |
| 13005 | bool HasAnyUndefs; |
| 13006 | if (BVN && (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) && |
| 13007 | BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { |
| 13008 | if (SplatBitSize == 8 || SplatBitSize == 16 || SplatBitSize == 32 || |
| 13009 | SplatBitSize == 64) { |
| 13010 | EVT VbicVT; |
| 13011 | SDValue Val = isVMOVModifiedImm((~SplatBits).getZExtValue(), |
| 13012 | SplatUndef.getZExtValue(), SplatBitSize, |
| 13013 | DAG, dl, VbicVT, VT, OtherModImm); |
| 13014 | if (Val.getNode()) { |
| 13015 | SDValue Input = |
| 13016 | DAG.getNode(ISD::BITCAST, dl, VbicVT, N->getOperand(0)); |
| 13017 | SDValue Vbic = DAG.getNode(ARMISD::VBICIMM, dl, VbicVT, Input, Val); |
| 13018 | return DAG.getNode(ISD::BITCAST, dl, VT, Vbic); |
| 13019 | } |
| 13020 | } |
| 13021 | } |
| 13022 | |
| 13023 | if (!Subtarget->isThumb1Only()) { |
| 13024 | // fold (and (select cc, -1, c), x) -> (select cc, x, (and, x, c)) |
| 13025 | if (SDValue Result = combineSelectAndUseCommutative(N, true, DCI)) |
| 13026 | return Result; |
| 13027 | |
| 13028 | if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget)) |
| 13029 | return Result; |
| 13030 | } |
| 13031 | |
| 13032 | if (Subtarget->isThumb1Only()) |
| 13033 | if (SDValue Result = CombineANDShift(N, DCI, Subtarget)) |
| 13034 | return Result; |
| 13035 | |
| 13036 | return SDValue(); |
| 13037 | } |
| 13038 | |
| 13039 | // Try combining OR nodes to SMULWB, SMULWT. |
| 13040 | static SDValue PerformORCombineToSMULWBT(SDNode *OR, |
| 13041 | TargetLowering::DAGCombinerInfo &DCI, |
| 13042 | const ARMSubtarget *Subtarget) { |
| 13043 | if (!Subtarget->hasV6Ops() || |
| 13044 | (Subtarget->isThumb() && |
| 13045 | (!Subtarget->hasThumb2() || !Subtarget->hasDSP()))) |
| 13046 | return SDValue(); |
| 13047 | |
| 13048 | SDValue SRL = OR->getOperand(0); |
| 13049 | SDValue SHL = OR->getOperand(1); |
| 13050 | |
| 13051 | if (SRL.getOpcode() != ISD::SRL || SHL.getOpcode() != ISD::SHL) { |
| 13052 | SRL = OR->getOperand(1); |
| 13053 | SHL = OR->getOperand(0); |
| 13054 | } |
| 13055 | if (!isSRL16(SRL) || !isSHL16(SHL)) |
| 13056 | return SDValue(); |
| 13057 | |
| 13058 | // The first operands to the shifts need to be the two results from the |
| 13059 | // same smul_lohi node. |
| 13060 | if ((SRL.getOperand(0).getNode() != SHL.getOperand(0).getNode()) || |
| 13061 | SRL.getOperand(0).getOpcode() != ISD::SMUL_LOHI) |
| 13062 | return SDValue(); |
| 13063 | |
| 13064 | SDNode *SMULLOHI = SRL.getOperand(0).getNode(); |
| 13065 | if (SRL.getOperand(0) != SDValue(SMULLOHI, 0) || |
| 13066 | SHL.getOperand(0) != SDValue(SMULLOHI, 1)) |
| 13067 | return SDValue(); |
| 13068 | |
| 13069 | // Now we have: |
| 13070 | // (or (srl (smul_lohi ?, ?), 16), (shl (smul_lohi ?, ?), 16))) |
| 13071 | // For SMUL[B|T] smul_lohi will take a 32-bit and a 16-bit arguments. |
| 13072 | // For SMUWB the 16-bit value will signed extended somehow. |
| 13073 | // For SMULWT only the SRA is required. |
| 13074 | // Check both sides of SMUL_LOHI |
| 13075 | SDValue OpS16 = SMULLOHI->getOperand(0); |
| 13076 | SDValue OpS32 = SMULLOHI->getOperand(1); |
| 13077 | |
| 13078 | SelectionDAG &DAG = DCI.DAG; |
| 13079 | if (!isS16(OpS16, DAG) && !isSRA16(OpS16)) { |
| 13080 | OpS16 = OpS32; |
| 13081 | OpS32 = SMULLOHI->getOperand(0); |
| 13082 | } |
| 13083 | |
| 13084 | SDLoc dl(OR); |
| 13085 | unsigned Opcode = 0; |
| 13086 | if (isS16(OpS16, DAG)) |
| 13087 | Opcode = ARMISD::SMULWB; |
| 13088 | else if (isSRA16(OpS16)) { |
| 13089 | Opcode = ARMISD::SMULWT; |
| 13090 | OpS16 = OpS16->getOperand(0); |
| 13091 | } |
| 13092 | else |
| 13093 | return SDValue(); |
| 13094 | |
| 13095 | SDValue Res = DAG.getNode(Opcode, dl, MVT::i32, OpS32, OpS16); |
| 13096 | DAG.ReplaceAllUsesOfValueWith(SDValue(OR, 0), Res); |
| 13097 | return SDValue(OR, 0); |
| 13098 | } |
| 13099 | |
| 13100 | static SDValue PerformORCombineToBFI(SDNode *N, |
| 13101 | TargetLowering::DAGCombinerInfo &DCI, |
| 13102 | const ARMSubtarget *Subtarget) { |
| 13103 | // BFI is only available on V6T2+ |
| 13104 | if (Subtarget->isThumb1Only() || !Subtarget->hasV6T2Ops()) |
| 13105 | return SDValue(); |
| 13106 | |
| 13107 | EVT VT = N->getValueType(0); |
| 13108 | SDValue N0 = N->getOperand(0); |
| 13109 | SDValue N1 = N->getOperand(1); |
| 13110 | SelectionDAG &DAG = DCI.DAG; |
| 13111 | SDLoc DL(N); |
| 13112 | // 1) or (and A, mask), val => ARMbfi A, val, mask |
| 13113 | // iff (val & mask) == val |
| 13114 | // |
| 13115 | // 2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask |
| 13116 | // 2a) iff isBitFieldInvertedMask(mask) && isBitFieldInvertedMask(~mask2) |
| 13117 | // && mask == ~mask2 |
| 13118 | // 2b) iff isBitFieldInvertedMask(~mask) && isBitFieldInvertedMask(mask2) |
| 13119 | // && ~mask == mask2 |
| 13120 | // (i.e., copy a bitfield value into another bitfield of the same width) |
| 13121 | |
| 13122 | if (VT != MVT::i32) |
| 13123 | return SDValue(); |
| 13124 | |
| 13125 | SDValue N00 = N0.getOperand(0); |
| 13126 | |
| 13127 | // The value and the mask need to be constants so we can verify this is |
| 13128 | // actually a bitfield set. If the mask is 0xffff, we can do better |
| 13129 | // via a movt instruction, so don't use BFI in that case. |
| 13130 | SDValue MaskOp = N0.getOperand(1); |
| 13131 | ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(MaskOp); |
| 13132 | if (!MaskC) |
| 13133 | return SDValue(); |
| 13134 | unsigned Mask = MaskC->getZExtValue(); |
| 13135 | if (Mask == 0xffff) |
| 13136 | return SDValue(); |
| 13137 | SDValue Res; |
| 13138 | // Case (1): or (and A, mask), val => ARMbfi A, val, mask |
| 13139 | ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); |
| 13140 | if (N1C) { |
| 13141 | unsigned Val = N1C->getZExtValue(); |
| 13142 | if ((Val & ~Mask) != Val) |
| 13143 | return SDValue(); |
| 13144 | |
| 13145 | if (ARM::isBitFieldInvertedMask(Mask)) { |
| 13146 | Val >>= countTrailingZeros(~Mask); |
| 13147 | |
| 13148 | Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, |
| 13149 | DAG.getConstant(Val, DL, MVT::i32), |
| 13150 | DAG.getConstant(Mask, DL, MVT::i32)); |
| 13151 | |
| 13152 | DCI.CombineTo(N, Res, false); |
| 13153 | // Return value from the original node to inform the combiner than N is |
| 13154 | // now dead. |
| 13155 | return SDValue(N, 0); |
| 13156 | } |
| 13157 | } else if (N1.getOpcode() == ISD::AND) { |
| 13158 | // case (2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask |
| 13159 | ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); |
| 13160 | if (!N11C) |
| 13161 | return SDValue(); |
| 13162 | unsigned Mask2 = N11C->getZExtValue(); |
| 13163 | |
| 13164 | // Mask and ~Mask2 (or reverse) must be equivalent for the BFI pattern |
| 13165 | // as is to match. |
| 13166 | if (ARM::isBitFieldInvertedMask(Mask) && |
| 13167 | (Mask == ~Mask2)) { |
| 13168 | // The pack halfword instruction works better for masks that fit it, |
| 13169 | // so use that when it's available. |
| 13170 | if (Subtarget->hasDSP() && |
| 13171 | (Mask == 0xffff || Mask == 0xffff0000)) |
| 13172 | return SDValue(); |
| 13173 | // 2a |
| 13174 | unsigned amt = countTrailingZeros(Mask2); |
| 13175 | Res = DAG.getNode(ISD::SRL, DL, VT, N1.getOperand(0), |
| 13176 | DAG.getConstant(amt, DL, MVT::i32)); |
| 13177 | Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, Res, |
| 13178 | DAG.getConstant(Mask, DL, MVT::i32)); |
| 13179 | DCI.CombineTo(N, Res, false); |
| 13180 | // Return value from the original node to inform the combiner than N is |
| 13181 | // now dead. |
| 13182 | return SDValue(N, 0); |
| 13183 | } else if (ARM::isBitFieldInvertedMask(~Mask) && |
| 13184 | (~Mask == Mask2)) { |
| 13185 | // The pack halfword instruction works better for masks that fit it, |
| 13186 | // so use that when it's available. |
| 13187 | if (Subtarget->hasDSP() && |
| 13188 | (Mask2 == 0xffff || Mask2 == 0xffff0000)) |
| 13189 | return SDValue(); |
| 13190 | // 2b |
| 13191 | unsigned lsb = countTrailingZeros(Mask); |
| 13192 | Res = DAG.getNode(ISD::SRL, DL, VT, N00, |
| 13193 | DAG.getConstant(lsb, DL, MVT::i32)); |
| 13194 | Res = DAG.getNode(ARMISD::BFI, DL, VT, N1.getOperand(0), Res, |
| 13195 | DAG.getConstant(Mask2, DL, MVT::i32)); |
| 13196 | DCI.CombineTo(N, Res, false); |
| 13197 | // Return value from the original node to inform the combiner than N is |
| 13198 | // now dead. |
| 13199 | return SDValue(N, 0); |
| 13200 | } |
| 13201 | } |
| 13202 | |
| 13203 | if (DAG.MaskedValueIsZero(N1, MaskC->getAPIntValue()) && |
| 13204 | N00.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N00.getOperand(1)) && |
| 13205 | ARM::isBitFieldInvertedMask(~Mask)) { |
| 13206 | // Case (3): or (and (shl A, #shamt), mask), B => ARMbfi B, A, ~mask |
| 13207 | // where lsb(mask) == #shamt and masked bits of B are known zero. |
| 13208 | SDValue ShAmt = N00.getOperand(1); |
| 13209 | unsigned ShAmtC = cast<ConstantSDNode>(ShAmt)->getZExtValue(); |
| 13210 | unsigned LSB = countTrailingZeros(Mask); |
| 13211 | if (ShAmtC != LSB) |
| 13212 | return SDValue(); |
| 13213 | |
| 13214 | Res = DAG.getNode(ARMISD::BFI, DL, VT, N1, N00.getOperand(0), |
| 13215 | DAG.getConstant(~Mask, DL, MVT::i32)); |
| 13216 | |
| 13217 | DCI.CombineTo(N, Res, false); |
| 13218 | // Return value from the original node to inform the combiner than N is |
| 13219 | // now dead. |
| 13220 | return SDValue(N, 0); |
| 13221 | } |
| 13222 | |
| 13223 | return SDValue(); |
| 13224 | } |
| 13225 | |
| 13226 | static bool isValidMVECond(unsigned CC, bool IsFloat) { |
| 13227 | switch (CC) { |
| 13228 | case ARMCC::EQ: |
| 13229 | case ARMCC::NE: |
| 13230 | case ARMCC::LE: |
| 13231 | case ARMCC::GT: |
| 13232 | case ARMCC::GE: |
| 13233 | case ARMCC::LT: |
| 13234 | return true; |
| 13235 | case ARMCC::HS: |
| 13236 | case ARMCC::HI: |
| 13237 | return !IsFloat; |
| 13238 | default: |
| 13239 | return false; |
| 13240 | }; |
| 13241 | } |
| 13242 | |
| 13243 | static ARMCC::CondCodes getVCMPCondCode(SDValue N) { |
| 13244 | if (N->getOpcode() == ARMISD::VCMP) |
| 13245 | return (ARMCC::CondCodes)N->getConstantOperandVal(2); |
| 13246 | else if (N->getOpcode() == ARMISD::VCMPZ) |
| 13247 | return (ARMCC::CondCodes)N->getConstantOperandVal(1); |
| 13248 | else |
| 13249 | llvm_unreachable("Not a VCMP/VCMPZ!" ); |
| 13250 | } |
| 13251 | |
| 13252 | static bool CanInvertMVEVCMP(SDValue N) { |
| 13253 | ARMCC::CondCodes CC = ARMCC::getOppositeCondition(getVCMPCondCode(N)); |
| 13254 | return isValidMVECond(CC, N->getOperand(0).getValueType().isFloatingPoint()); |
| 13255 | } |
| 13256 | |
| 13257 | static SDValue PerformORCombine_i1(SDNode *N, |
| 13258 | TargetLowering::DAGCombinerInfo &DCI, |
| 13259 | const ARMSubtarget *Subtarget) { |
| 13260 | // Try to invert "or A, B" -> "and ~A, ~B", as the "and" is easier to chain |
| 13261 | // together with predicates |
| 13262 | EVT VT = N->getValueType(0); |
| 13263 | SDLoc DL(N); |
| 13264 | SDValue N0 = N->getOperand(0); |
| 13265 | SDValue N1 = N->getOperand(1); |
| 13266 | |
| 13267 | auto IsFreelyInvertable = [&](SDValue V) { |
| 13268 | if (V->getOpcode() == ARMISD::VCMP || V->getOpcode() == ARMISD::VCMPZ) |
| 13269 | return CanInvertMVEVCMP(V); |
| 13270 | return false; |
| 13271 | }; |
| 13272 | |
| 13273 | // At least one operand must be freely invertable. |
| 13274 | if (!(IsFreelyInvertable(N0) || IsFreelyInvertable(N1))) |
| 13275 | return SDValue(); |
| 13276 | |
| 13277 | SDValue NewN0 = DCI.DAG.getLogicalNOT(DL, N0, VT); |
| 13278 | SDValue NewN1 = DCI.DAG.getLogicalNOT(DL, N1, VT); |
| 13279 | SDValue And = DCI.DAG.getNode(ISD::AND, DL, VT, NewN0, NewN1); |
| 13280 | return DCI.DAG.getLogicalNOT(DL, And, VT); |
| 13281 | } |
| 13282 | |
| 13283 | /// PerformORCombine - Target-specific dag combine xforms for ISD::OR |
| 13284 | static SDValue PerformORCombine(SDNode *N, |
| 13285 | TargetLowering::DAGCombinerInfo &DCI, |
| 13286 | const ARMSubtarget *Subtarget) { |
| 13287 | // Attempt to use immediate-form VORR |
| 13288 | BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); |
| 13289 | SDLoc dl(N); |
| 13290 | EVT VT = N->getValueType(0); |
| 13291 | SelectionDAG &DAG = DCI.DAG; |
| 13292 | |
| 13293 | if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) |
| 13294 | return SDValue(); |
| 13295 | |
| 13296 | if (Subtarget->hasMVEIntegerOps() && |
| 13297 | (VT == MVT::v4i1 || VT == MVT::v8i1 || VT == MVT::v16i1)) |
| 13298 | return PerformORCombine_i1(N, DCI, Subtarget); |
| 13299 | |
| 13300 | APInt SplatBits, SplatUndef; |
| 13301 | unsigned SplatBitSize; |
| 13302 | bool HasAnyUndefs; |
| 13303 | if (BVN && (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) && |
| 13304 | BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { |
| 13305 | if (SplatBitSize == 8 || SplatBitSize == 16 || SplatBitSize == 32 || |
| 13306 | SplatBitSize == 64) { |
| 13307 | EVT VorrVT; |
| 13308 | SDValue Val = |
| 13309 | isVMOVModifiedImm(SplatBits.getZExtValue(), SplatUndef.getZExtValue(), |
| 13310 | SplatBitSize, DAG, dl, VorrVT, VT, OtherModImm); |
| 13311 | if (Val.getNode()) { |
| 13312 | SDValue Input = |
| 13313 | DAG.getNode(ISD::BITCAST, dl, VorrVT, N->getOperand(0)); |
| 13314 | SDValue Vorr = DAG.getNode(ARMISD::VORRIMM, dl, VorrVT, Input, Val); |
| 13315 | return DAG.getNode(ISD::BITCAST, dl, VT, Vorr); |
| 13316 | } |
| 13317 | } |
| 13318 | } |
| 13319 | |
| 13320 | if (!Subtarget->isThumb1Only()) { |
| 13321 | // fold (or (select cc, 0, c), x) -> (select cc, x, (or, x, c)) |
| 13322 | if (SDValue Result = combineSelectAndUseCommutative(N, false, DCI)) |
| 13323 | return Result; |
| 13324 | if (SDValue Result = PerformORCombineToSMULWBT(N, DCI, Subtarget)) |
| 13325 | return Result; |
| 13326 | } |
| 13327 | |
| 13328 | SDValue N0 = N->getOperand(0); |
| 13329 | SDValue N1 = N->getOperand(1); |
| 13330 | |
| 13331 | // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant. |
| 13332 | if (Subtarget->hasNEON() && N1.getOpcode() == ISD::AND && VT.isVector() && |
| 13333 | DAG.getTargetLoweringInfo().isTypeLegal(VT)) { |
| 13334 | |
| 13335 | // The code below optimizes (or (and X, Y), Z). |
| 13336 | // The AND operand needs to have a single user to make these optimizations |
| 13337 | // profitable. |
| 13338 | if (N0.getOpcode() != ISD::AND || !N0.hasOneUse()) |
| 13339 | return SDValue(); |
| 13340 | |
| 13341 | APInt SplatUndef; |
| 13342 | unsigned SplatBitSize; |
| 13343 | bool HasAnyUndefs; |
| 13344 | |
| 13345 | APInt SplatBits0, SplatBits1; |
| 13346 | BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(1)); |
| 13347 | BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1)); |
| 13348 | // Ensure that the second operand of both ands are constants |
| 13349 | if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize, |
| 13350 | HasAnyUndefs) && !HasAnyUndefs) { |
| 13351 | if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize, |
| 13352 | HasAnyUndefs) && !HasAnyUndefs) { |
| 13353 | // Ensure that the bit width of the constants are the same and that |
| 13354 | // the splat arguments are logical inverses as per the pattern we |
| 13355 | // are trying to simplify. |
| 13356 | if (SplatBits0.getBitWidth() == SplatBits1.getBitWidth() && |
| 13357 | SplatBits0 == ~SplatBits1) { |
| 13358 | // Canonicalize the vector type to make instruction selection |
| 13359 | // simpler. |
| 13360 | EVT CanonicalVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; |
| 13361 | SDValue Result = DAG.getNode(ARMISD::VBSP, dl, CanonicalVT, |
| 13362 | N0->getOperand(1), |
| 13363 | N0->getOperand(0), |
| 13364 | N1->getOperand(0)); |
| 13365 | return DAG.getNode(ISD::BITCAST, dl, VT, Result); |
| 13366 | } |
| 13367 | } |
| 13368 | } |
| 13369 | } |
| 13370 | |
| 13371 | // Try to use the ARM/Thumb2 BFI (bitfield insert) instruction when |
| 13372 | // reasonable. |
| 13373 | if (N0.getOpcode() == ISD::AND && N0.hasOneUse()) { |
| 13374 | if (SDValue Res = PerformORCombineToBFI(N, DCI, Subtarget)) |
| 13375 | return Res; |
| 13376 | } |
| 13377 | |
| 13378 | if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget)) |
| 13379 | return Result; |
| 13380 | |
| 13381 | return SDValue(); |
| 13382 | } |
| 13383 | |
| 13384 | static SDValue PerformXORCombine(SDNode *N, |
| 13385 | TargetLowering::DAGCombinerInfo &DCI, |
| 13386 | const ARMSubtarget *Subtarget) { |
| 13387 | EVT VT = N->getValueType(0); |
| 13388 | SelectionDAG &DAG = DCI.DAG; |
| 13389 | |
| 13390 | if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) |
| 13391 | return SDValue(); |
| 13392 | |
| 13393 | if (!Subtarget->isThumb1Only()) { |
| 13394 | // fold (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c)) |
| 13395 | if (SDValue Result = combineSelectAndUseCommutative(N, false, DCI)) |
| 13396 | return Result; |
| 13397 | |
| 13398 | if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget)) |
| 13399 | return Result; |
| 13400 | } |
| 13401 | |
| 13402 | if (Subtarget->hasMVEIntegerOps()) { |
| 13403 | // fold (xor(vcmp/z, 1)) into a vcmp with the opposite condition. |
| 13404 | SDValue N0 = N->getOperand(0); |
| 13405 | SDValue N1 = N->getOperand(1); |
| 13406 | const TargetLowering *TLI = Subtarget->getTargetLowering(); |
| 13407 | if (TLI->isConstTrueVal(N1.getNode()) && |
| 13408 | (N0->getOpcode() == ARMISD::VCMP || N0->getOpcode() == ARMISD::VCMPZ)) { |
| 13409 | if (CanInvertMVEVCMP(N0)) { |
| 13410 | SDLoc DL(N0); |
| 13411 | ARMCC::CondCodes CC = ARMCC::getOppositeCondition(getVCMPCondCode(N0)); |
| 13412 | |
| 13413 | SmallVector<SDValue, 4> Ops; |
| 13414 | Ops.push_back(N0->getOperand(0)); |
| 13415 | if (N0->getOpcode() == ARMISD::VCMP) |
| 13416 | Ops.push_back(N0->getOperand(1)); |
| 13417 | Ops.push_back(DCI.DAG.getConstant(CC, DL, MVT::i32)); |
| 13418 | return DCI.DAG.getNode(N0->getOpcode(), DL, N0->getValueType(0), Ops); |
| 13419 | } |
| 13420 | } |
| 13421 | } |
| 13422 | |
| 13423 | return SDValue(); |
| 13424 | } |
| 13425 | |
| 13426 | // ParseBFI - given a BFI instruction in N, extract the "from" value (Rn) and return it, |
| 13427 | // and fill in FromMask and ToMask with (consecutive) bits in "from" to be extracted and |
| 13428 | // their position in "to" (Rd). |
| 13429 | static SDValue ParseBFI(SDNode *N, APInt &ToMask, APInt &FromMask) { |
| 13430 | assert(N->getOpcode() == ARMISD::BFI); |
| 13431 | |
| 13432 | SDValue From = N->getOperand(1); |
| 13433 | ToMask = ~cast<ConstantSDNode>(N->getOperand(2))->getAPIntValue(); |
| 13434 | FromMask = APInt::getLowBitsSet(ToMask.getBitWidth(), ToMask.countPopulation()); |
| 13435 | |
| 13436 | // If the Base came from a SHR #C, we can deduce that it is really testing bit |
| 13437 | // #C in the base of the SHR. |
| 13438 | if (From->getOpcode() == ISD::SRL && |
| 13439 | isa<ConstantSDNode>(From->getOperand(1))) { |
| 13440 | APInt Shift = cast<ConstantSDNode>(From->getOperand(1))->getAPIntValue(); |
| 13441 | assert(Shift.getLimitedValue() < 32 && "Shift too large!" ); |
| 13442 | FromMask <<= Shift.getLimitedValue(31); |
| 13443 | From = From->getOperand(0); |
| 13444 | } |
| 13445 | |
| 13446 | return From; |
| 13447 | } |
| 13448 | |
| 13449 | // If A and B contain one contiguous set of bits, does A | B == A . B? |
| 13450 | // |
| 13451 | // Neither A nor B must be zero. |
| 13452 | static bool BitsProperlyConcatenate(const APInt &A, const APInt &B) { |
| 13453 | unsigned LastActiveBitInA = A.countTrailingZeros(); |
| 13454 | unsigned FirstActiveBitInB = B.getBitWidth() - B.countLeadingZeros() - 1; |
| 13455 | return LastActiveBitInA - 1 == FirstActiveBitInB; |
| 13456 | } |
| 13457 | |
| 13458 | static SDValue FindBFIToCombineWith(SDNode *N) { |
| 13459 | // We have a BFI in N. Follow a possible chain of BFIs and find a BFI it can combine with, |
| 13460 | // if one exists. |
| 13461 | APInt ToMask, FromMask; |
| 13462 | SDValue From = ParseBFI(N, ToMask, FromMask); |
| 13463 | SDValue To = N->getOperand(0); |
| 13464 | |
| 13465 | // Now check for a compatible BFI to merge with. We can pass through BFIs that |
| 13466 | // aren't compatible, but not if they set the same bit in their destination as |
| 13467 | // we do (or that of any BFI we're going to combine with). |
| 13468 | SDValue V = To; |
| 13469 | APInt CombinedToMask = ToMask; |
| 13470 | while (V.getOpcode() == ARMISD::BFI) { |
| 13471 | APInt NewToMask, NewFromMask; |
| 13472 | SDValue NewFrom = ParseBFI(V.getNode(), NewToMask, NewFromMask); |
| 13473 | if (NewFrom != From) { |
| 13474 | // This BFI has a different base. Keep going. |
| 13475 | CombinedToMask |= NewToMask; |
| 13476 | V = V.getOperand(0); |
| 13477 | continue; |
| 13478 | } |
| 13479 | |
| 13480 | // Do the written bits conflict with any we've seen so far? |
| 13481 | if ((NewToMask & CombinedToMask).getBoolValue()) |
| 13482 | // Conflicting bits - bail out because going further is unsafe. |
| 13483 | return SDValue(); |
| 13484 | |
| 13485 | // Are the new bits contiguous when combined with the old bits? |
| 13486 | if (BitsProperlyConcatenate(ToMask, NewToMask) && |
| 13487 | BitsProperlyConcatenate(FromMask, NewFromMask)) |
| 13488 | return V; |
| 13489 | if (BitsProperlyConcatenate(NewToMask, ToMask) && |
| 13490 | BitsProperlyConcatenate(NewFromMask, FromMask)) |
| 13491 | return V; |
| 13492 | |
| 13493 | // We've seen a write to some bits, so track it. |
| 13494 | CombinedToMask |= NewToMask; |
| 13495 | // Keep going... |
| 13496 | V = V.getOperand(0); |
| 13497 | } |
| 13498 | |
| 13499 | return SDValue(); |
| 13500 | } |
| 13501 | |
| 13502 | static SDValue PerformBFICombine(SDNode *N, |
| 13503 | TargetLowering::DAGCombinerInfo &DCI) { |
| 13504 | SDValue N1 = N->getOperand(1); |
| 13505 | if (N1.getOpcode() == ISD::AND) { |
| 13506 | // (bfi A, (and B, Mask1), Mask2) -> (bfi A, B, Mask2) iff |
| 13507 | // the bits being cleared by the AND are not demanded by the BFI. |
| 13508 | ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); |
| 13509 | if (!N11C) |
| 13510 | return SDValue(); |
| 13511 | unsigned InvMask = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue(); |
| 13512 | unsigned LSB = countTrailingZeros(~InvMask); |
| 13513 | unsigned Width = (32 - countLeadingZeros(~InvMask)) - LSB; |
| 13514 | assert(Width < |
| 13515 | static_cast<unsigned>(std::numeric_limits<unsigned>::digits) && |
| 13516 | "undefined behavior" ); |
| 13517 | unsigned Mask = (1u << Width) - 1; |
| 13518 | unsigned Mask2 = N11C->getZExtValue(); |
| 13519 | if ((Mask & (~Mask2)) == 0) |
| 13520 | return DCI.DAG.getNode(ARMISD::BFI, SDLoc(N), N->getValueType(0), |
| 13521 | N->getOperand(0), N1.getOperand(0), |
| 13522 | N->getOperand(2)); |
| 13523 | } else if (N->getOperand(0).getOpcode() == ARMISD::BFI) { |
| 13524 | // We have a BFI of a BFI. Walk up the BFI chain to see how long it goes. |
| 13525 | // Keep track of any consecutive bits set that all come from the same base |
| 13526 | // value. We can combine these together into a single BFI. |
| 13527 | SDValue CombineBFI = FindBFIToCombineWith(N); |
| 13528 | if (CombineBFI == SDValue()) |
| 13529 | return SDValue(); |
| 13530 | |
| 13531 | // We've found a BFI. |
| 13532 | APInt ToMask1, FromMask1; |
| 13533 | SDValue From1 = ParseBFI(N, ToMask1, FromMask1); |
| 13534 | |
| 13535 | APInt ToMask2, FromMask2; |
| 13536 | SDValue From2 = ParseBFI(CombineBFI.getNode(), ToMask2, FromMask2); |
| 13537 | assert(From1 == From2); |
| 13538 | (void)From2; |
| 13539 | |
| 13540 | // First, unlink CombineBFI. |
| 13541 | DCI.DAG.ReplaceAllUsesWith(CombineBFI, CombineBFI.getOperand(0)); |
| 13542 | // Then create a new BFI, combining the two together. |
| 13543 | APInt NewFromMask = FromMask1 | FromMask2; |
| 13544 | APInt NewToMask = ToMask1 | ToMask2; |
| 13545 | |
| 13546 | EVT VT = N->getValueType(0); |
| 13547 | SDLoc dl(N); |
| 13548 | |
| 13549 | if (NewFromMask[0] == 0) |
| 13550 | From1 = DCI.DAG.getNode( |
| 13551 | ISD::SRL, dl, VT, From1, |
| 13552 | DCI.DAG.getConstant(NewFromMask.countTrailingZeros(), dl, VT)); |
| 13553 | return DCI.DAG.getNode(ARMISD::BFI, dl, VT, N->getOperand(0), From1, |
| 13554 | DCI.DAG.getConstant(~NewToMask, dl, VT)); |
| 13555 | } |
| 13556 | return SDValue(); |
| 13557 | } |
| 13558 | |
| 13559 | /// PerformVMOVRRDCombine - Target-specific dag combine xforms for |
| 13560 | /// ARMISD::VMOVRRD. |
| 13561 | static SDValue PerformVMOVRRDCombine(SDNode *N, |
| 13562 | TargetLowering::DAGCombinerInfo &DCI, |
| 13563 | const ARMSubtarget *Subtarget) { |
| 13564 | // vmovrrd(vmovdrr x, y) -> x,y |
| 13565 | SDValue InDouble = N->getOperand(0); |
| 13566 | if (InDouble.getOpcode() == ARMISD::VMOVDRR && Subtarget->hasFP64()) |
| 13567 | return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1)); |
| 13568 | |
| 13569 | // vmovrrd(load f64) -> (load i32), (load i32) |
| 13570 | SDNode *InNode = InDouble.getNode(); |
| 13571 | if (ISD::isNormalLoad(InNode) && InNode->hasOneUse() && |
| 13572 | InNode->getValueType(0) == MVT::f64 && |
| 13573 | InNode->getOperand(1).getOpcode() == ISD::FrameIndex && |
| 13574 | !cast<LoadSDNode>(InNode)->isVolatile()) { |
| 13575 | // TODO: Should this be done for non-FrameIndex operands? |
| 13576 | LoadSDNode *LD = cast<LoadSDNode>(InNode); |
| 13577 | |
| 13578 | SelectionDAG &DAG = DCI.DAG; |
| 13579 | SDLoc DL(LD); |
| 13580 | SDValue BasePtr = LD->getBasePtr(); |
| 13581 | SDValue NewLD1 = |
| 13582 | DAG.getLoad(MVT::i32, DL, LD->getChain(), BasePtr, LD->getPointerInfo(), |
| 13583 | LD->getAlignment(), LD->getMemOperand()->getFlags()); |
| 13584 | |
| 13585 | SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, |
| 13586 | DAG.getConstant(4, DL, MVT::i32)); |
| 13587 | |
| 13588 | SDValue NewLD2 = DAG.getLoad(MVT::i32, DL, LD->getChain(), OffsetPtr, |
| 13589 | LD->getPointerInfo().getWithOffset(4), |
| 13590 | std::min(4U, LD->getAlignment()), |
| 13591 | LD->getMemOperand()->getFlags()); |
| 13592 | |
| 13593 | DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewLD2.getValue(1)); |
| 13594 | if (DCI.DAG.getDataLayout().isBigEndian()) |
| 13595 | std::swap (NewLD1, NewLD2); |
| 13596 | SDValue Result = DCI.CombineTo(N, NewLD1, NewLD2); |
| 13597 | return Result; |
| 13598 | } |
| 13599 | |
| 13600 | return SDValue(); |
| 13601 | } |
| 13602 | |
| 13603 | /// PerformVMOVDRRCombine - Target-specific dag combine xforms for |
| 13604 | /// ARMISD::VMOVDRR. This is also used for BUILD_VECTORs with 2 operands. |
| 13605 | static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) { |
| 13606 | // N=vmovrrd(X); vmovdrr(N:0, N:1) -> bit_convert(X) |
| 13607 | SDValue Op0 = N->getOperand(0); |
| 13608 | SDValue Op1 = N->getOperand(1); |
| 13609 | if (Op0.getOpcode() == ISD::BITCAST) |
| 13610 | Op0 = Op0.getOperand(0); |
| 13611 | if (Op1.getOpcode() == ISD::BITCAST) |
| 13612 | Op1 = Op1.getOperand(0); |
| 13613 | if (Op0.getOpcode() == ARMISD::VMOVRRD && |
| 13614 | Op0.getNode() == Op1.getNode() && |
| 13615 | Op0.getResNo() == 0 && Op1.getResNo() == 1) |
| 13616 | return DAG.getNode(ISD::BITCAST, SDLoc(N), |
| 13617 | N->getValueType(0), Op0.getOperand(0)); |
| 13618 | return SDValue(); |
| 13619 | } |
| 13620 | |
| 13621 | static SDValue PerformVMOVhrCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { |
| 13622 | SDValue Op0 = N->getOperand(0); |
| 13623 | |
| 13624 | // VMOVhr (VMOVrh (X)) -> X |
| 13625 | if (Op0->getOpcode() == ARMISD::VMOVrh) |
| 13626 | return Op0->getOperand(0); |
| 13627 | |
| 13628 | // FullFP16: half values are passed in S-registers, and we don't |
| 13629 | // need any of the bitcast and moves: |
| 13630 | // |
| 13631 | // t2: f32,ch = CopyFromReg t0, Register:f32 %0 |
| 13632 | // t5: i32 = bitcast t2 |
| 13633 | // t18: f16 = ARMISD::VMOVhr t5 |
| 13634 | if (Op0->getOpcode() == ISD::BITCAST) { |
| 13635 | SDValue Copy = Op0->getOperand(0); |
| 13636 | if (Copy.getValueType() == MVT::f32 && |
| 13637 | Copy->getOpcode() == ISD::CopyFromReg) { |
| 13638 | SDValue Ops[] = {Copy->getOperand(0), Copy->getOperand(1)}; |
| 13639 | SDValue NewCopy = |
| 13640 | DCI.DAG.getNode(ISD::CopyFromReg, SDLoc(N), N->getValueType(0), Ops); |
| 13641 | return NewCopy; |
| 13642 | } |
| 13643 | } |
| 13644 | |
| 13645 | // fold (VMOVhr (load x)) -> (load (f16*)x) |
| 13646 | if (LoadSDNode *LN0 = dyn_cast<LoadSDNode>(Op0)) { |
| 13647 | if (LN0->hasOneUse() && LN0->isUnindexed() && |
| 13648 | LN0->getMemoryVT() == MVT::i16) { |
| 13649 | SDValue Load = |
| 13650 | DCI.DAG.getLoad(N->getValueType(0), SDLoc(N), LN0->getChain(), |
| 13651 | LN0->getBasePtr(), LN0->getMemOperand()); |
| 13652 | DCI.DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Load.getValue(0)); |
| 13653 | DCI.DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), Load.getValue(1)); |
| 13654 | return Load; |
| 13655 | } |
| 13656 | } |
| 13657 | |
| 13658 | // Only the bottom 16 bits of the source register are used. |
| 13659 | APInt DemandedMask = APInt::getLowBitsSet(32, 16); |
| 13660 | const TargetLowering &TLI = DCI.DAG.getTargetLoweringInfo(); |
| 13661 | if (TLI.SimplifyDemandedBits(Op0, DemandedMask, DCI)) |
| 13662 | return SDValue(N, 0); |
| 13663 | |
| 13664 | return SDValue(); |
| 13665 | } |
| 13666 | |
| 13667 | static SDValue PerformVMOVrhCombine(SDNode *N, |
| 13668 | TargetLowering::DAGCombinerInfo &DCI) { |
| 13669 | SDValue N0 = N->getOperand(0); |
| 13670 | EVT VT = N->getValueType(0); |
| 13671 | |
| 13672 | // fold (VMOVrh (fpconst x)) -> const x |
| 13673 | if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N0)) { |
| 13674 | APFloat V = C->getValueAPF(); |
| 13675 | return DCI.DAG.getConstant(V.bitcastToAPInt().getZExtValue(), SDLoc(N), VT); |
| 13676 | } |
| 13677 | |
| 13678 | // fold (VMOVrh (load x)) -> (zextload (i16*)x) |
| 13679 | if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse()) { |
| 13680 | LoadSDNode *LN0 = cast<LoadSDNode>(N0); |
| 13681 | |
| 13682 | SDValue Load = |
| 13683 | DCI.DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(N), VT, LN0->getChain(), |
| 13684 | LN0->getBasePtr(), MVT::i16, LN0->getMemOperand()); |
| 13685 | DCI.DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Load.getValue(0)); |
| 13686 | DCI.DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), Load.getValue(1)); |
| 13687 | return Load; |
| 13688 | } |
| 13689 | |
| 13690 | // Fold VMOVrh(extract(x, n)) -> vgetlaneu(x, n) |
| 13691 | if (N0->getOpcode() == ISD::EXTRACT_VECTOR_ELT && |
| 13692 | isa<ConstantSDNode>(N0->getOperand(1))) |
| 13693 | return DCI.DAG.getNode(ARMISD::VGETLANEu, SDLoc(N), VT, N0->getOperand(0), |
| 13694 | N0->getOperand(1)); |
| 13695 | |
| 13696 | return SDValue(); |
| 13697 | } |
| 13698 | |
| 13699 | /// hasNormalLoadOperand - Check if any of the operands of a BUILD_VECTOR node |
| 13700 | /// are normal, non-volatile loads. If so, it is profitable to bitcast an |
| 13701 | /// i64 vector to have f64 elements, since the value can then be loaded |
| 13702 | /// directly into a VFP register. |
| 13703 | static bool hasNormalLoadOperand(SDNode *N) { |
| 13704 | unsigned NumElts = N->getValueType(0).getVectorNumElements(); |
| 13705 | for (unsigned i = 0; i < NumElts; ++i) { |
| 13706 | SDNode *Elt = N->getOperand(i).getNode(); |
| 13707 | if (ISD::isNormalLoad(Elt) && !cast<LoadSDNode>(Elt)->isVolatile()) |
| 13708 | return true; |
| 13709 | } |
| 13710 | return false; |
| 13711 | } |
| 13712 | |
| 13713 | /// PerformBUILD_VECTORCombine - Target-specific dag combine xforms for |
| 13714 | /// ISD::BUILD_VECTOR. |
| 13715 | static SDValue PerformBUILD_VECTORCombine(SDNode *N, |
| 13716 | TargetLowering::DAGCombinerInfo &DCI, |
| 13717 | const ARMSubtarget *Subtarget) { |
| 13718 | // build_vector(N=ARMISD::VMOVRRD(X), N:1) -> bit_convert(X): |
| 13719 | // VMOVRRD is introduced when legalizing i64 types. It forces the i64 value |
| 13720 | // into a pair of GPRs, which is fine when the value is used as a scalar, |
| 13721 | // but if the i64 value is converted to a vector, we need to undo the VMOVRRD. |
| 13722 | SelectionDAG &DAG = DCI.DAG; |
| 13723 | if (N->getNumOperands() == 2) |
| 13724 | if (SDValue RV = PerformVMOVDRRCombine(N, DAG)) |
| 13725 | return RV; |
| 13726 | |
| 13727 | // Load i64 elements as f64 values so that type legalization does not split |
| 13728 | // them up into i32 values. |
| 13729 | EVT VT = N->getValueType(0); |
| 13730 | if (VT.getVectorElementType() != MVT::i64 || !hasNormalLoadOperand(N)) |
| 13731 | return SDValue(); |
| 13732 | SDLoc dl(N); |
| 13733 | SmallVector<SDValue, 8> Ops; |
| 13734 | unsigned NumElts = VT.getVectorNumElements(); |
| 13735 | for (unsigned i = 0; i < NumElts; ++i) { |
| 13736 | SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(i)); |
| 13737 | Ops.push_back(V); |
| 13738 | // Make the DAGCombiner fold the bitcast. |
| 13739 | DCI.AddToWorklist(V.getNode()); |
| 13740 | } |
| 13741 | EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, NumElts); |
| 13742 | SDValue BV = DAG.getBuildVector(FloatVT, dl, Ops); |
| 13743 | return DAG.getNode(ISD::BITCAST, dl, VT, BV); |
| 13744 | } |
| 13745 | |
| 13746 | /// Target-specific dag combine xforms for ARMISD::BUILD_VECTOR. |
| 13747 | static SDValue |
| 13748 | PerformARMBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { |
| 13749 | // ARMISD::BUILD_VECTOR is introduced when legalizing ISD::BUILD_VECTOR. |
| 13750 | // At that time, we may have inserted bitcasts from integer to float. |
| 13751 | // If these bitcasts have survived DAGCombine, change the lowering of this |
| 13752 | // BUILD_VECTOR in something more vector friendly, i.e., that does not |
| 13753 | // force to use floating point types. |
| 13754 | |
| 13755 | // Make sure we can change the type of the vector. |
| 13756 | // This is possible iff: |
| 13757 | // 1. The vector is only used in a bitcast to a integer type. I.e., |
| 13758 | // 1.1. Vector is used only once. |
| 13759 | // 1.2. Use is a bit convert to an integer type. |
| 13760 | // 2. The size of its operands are 32-bits (64-bits are not legal). |
| 13761 | EVT VT = N->getValueType(0); |
| 13762 | EVT EltVT = VT.getVectorElementType(); |
| 13763 | |
| 13764 | // Check 1.1. and 2. |
| 13765 | if (EltVT.getSizeInBits() != 32 || !N->hasOneUse()) |
| 13766 | return SDValue(); |
| 13767 | |
| 13768 | // By construction, the input type must be float. |
| 13769 | assert(EltVT == MVT::f32 && "Unexpected type!" ); |
| 13770 | |
| 13771 | // Check 1.2. |
| 13772 | SDNode *Use = *N->use_begin(); |
| 13773 | if (Use->getOpcode() != ISD::BITCAST || |
| 13774 | Use->getValueType(0).isFloatingPoint()) |
| 13775 | return SDValue(); |
| 13776 | |
| 13777 | // Check profitability. |
| 13778 | // Model is, if more than half of the relevant operands are bitcast from |
| 13779 | // i32, turn the build_vector into a sequence of insert_vector_elt. |
| 13780 | // Relevant operands are everything that is not statically |
| 13781 | // (i.e., at compile time) bitcasted. |
| 13782 | unsigned NumOfBitCastedElts = 0; |
| 13783 | unsigned NumElts = VT.getVectorNumElements(); |
| 13784 | unsigned NumOfRelevantElts = NumElts; |
| 13785 | for (unsigned Idx = 0; Idx < NumElts; ++Idx) { |
| 13786 | SDValue Elt = N->getOperand(Idx); |
| 13787 | if (Elt->getOpcode() == ISD::BITCAST) { |
| 13788 | // Assume only bit cast to i32 will go away. |
| 13789 | if (Elt->getOperand(0).getValueType() == MVT::i32) |
| 13790 | ++NumOfBitCastedElts; |
| 13791 | } else if (Elt.isUndef() || isa<ConstantSDNode>(Elt)) |
| 13792 | // Constants are statically casted, thus do not count them as |
| 13793 | // relevant operands. |
| 13794 | --NumOfRelevantElts; |
| 13795 | } |
| 13796 | |
| 13797 | // Check if more than half of the elements require a non-free bitcast. |
| 13798 | if (NumOfBitCastedElts <= NumOfRelevantElts / 2) |
| 13799 | return SDValue(); |
| 13800 | |
| 13801 | SelectionDAG &DAG = DCI.DAG; |
| 13802 | // Create the new vector type. |
| 13803 | EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts); |
| 13804 | // Check if the type is legal. |
| 13805 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 13806 | if (!TLI.isTypeLegal(VecVT)) |
| 13807 | return SDValue(); |
| 13808 | |
| 13809 | // Combine: |
| 13810 | // ARMISD::BUILD_VECTOR E1, E2, ..., EN. |
| 13811 | // => BITCAST INSERT_VECTOR_ELT |
| 13812 | // (INSERT_VECTOR_ELT (...), (BITCAST EN-1), N-1), |
| 13813 | // (BITCAST EN), N. |
| 13814 | SDValue Vec = DAG.getUNDEF(VecVT); |
| 13815 | SDLoc dl(N); |
| 13816 | for (unsigned Idx = 0 ; Idx < NumElts; ++Idx) { |
| 13817 | SDValue V = N->getOperand(Idx); |
| 13818 | if (V.isUndef()) |
| 13819 | continue; |
| 13820 | if (V.getOpcode() == ISD::BITCAST && |
| 13821 | V->getOperand(0).getValueType() == MVT::i32) |
| 13822 | // Fold obvious case. |
| 13823 | V = V.getOperand(0); |
| 13824 | else { |
| 13825 | V = DAG.getNode(ISD::BITCAST, SDLoc(V), MVT::i32, V); |
| 13826 | // Make the DAGCombiner fold the bitcasts. |
| 13827 | DCI.AddToWorklist(V.getNode()); |
| 13828 | } |
| 13829 | SDValue LaneIdx = DAG.getConstant(Idx, dl, MVT::i32); |
| 13830 | Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VecVT, Vec, V, LaneIdx); |
| 13831 | } |
| 13832 | Vec = DAG.getNode(ISD::BITCAST, dl, VT, Vec); |
| 13833 | // Make the DAGCombiner fold the bitcasts. |
| 13834 | DCI.AddToWorklist(Vec.getNode()); |
| 13835 | return Vec; |
| 13836 | } |
| 13837 | |
| 13838 | static SDValue |
| 13839 | PerformPREDICATE_CASTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { |
| 13840 | EVT VT = N->getValueType(0); |
| 13841 | SDValue Op = N->getOperand(0); |
| 13842 | SDLoc dl(N); |
| 13843 | |
| 13844 | // PREDICATE_CAST(PREDICATE_CAST(x)) == PREDICATE_CAST(x) |
| 13845 | if (Op->getOpcode() == ARMISD::PREDICATE_CAST) { |
| 13846 | // If the valuetypes are the same, we can remove the cast entirely. |
| 13847 | if (Op->getOperand(0).getValueType() == VT) |
| 13848 | return Op->getOperand(0); |
| 13849 | return DCI.DAG.getNode(ARMISD::PREDICATE_CAST, dl, VT, Op->getOperand(0)); |
| 13850 | } |
| 13851 | |
| 13852 | // Turn pred_cast(xor x, -1) into xor(pred_cast x, -1), in order to produce |
| 13853 | // more VPNOT which might get folded as else predicates. |
| 13854 | if (Op.getValueType() == MVT::i32 && isBitwiseNot(Op)) { |
| 13855 | SDValue X = |
| 13856 | DCI.DAG.getNode(ARMISD::PREDICATE_CAST, dl, VT, Op->getOperand(0)); |
| 13857 | SDValue C = DCI.DAG.getNode(ARMISD::PREDICATE_CAST, dl, VT, |
| 13858 | DCI.DAG.getConstant(65535, dl, MVT::i32)); |
| 13859 | return DCI.DAG.getNode(ISD::XOR, dl, VT, X, C); |
| 13860 | } |
| 13861 | |
| 13862 | // Only the bottom 16 bits of the source register are used. |
| 13863 | if (Op.getValueType() == MVT::i32) { |
| 13864 | APInt DemandedMask = APInt::getLowBitsSet(32, 16); |
| 13865 | const TargetLowering &TLI = DCI.DAG.getTargetLoweringInfo(); |
| 13866 | if (TLI.SimplifyDemandedBits(Op, DemandedMask, DCI)) |
| 13867 | return SDValue(N, 0); |
| 13868 | } |
| 13869 | return SDValue(); |
| 13870 | } |
| 13871 | |
| 13872 | static SDValue |
| 13873 | PerformVECTOR_REG_CASTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, |
| 13874 | const ARMSubtarget *ST) { |
| 13875 | EVT VT = N->getValueType(0); |
| 13876 | SDValue Op = N->getOperand(0); |
| 13877 | SDLoc dl(N); |
| 13878 | |
| 13879 | // Under Little endian, a VECTOR_REG_CAST is equivalent to a BITCAST |
| 13880 | if (ST->isLittle()) |
| 13881 | return DCI.DAG.getNode(ISD::BITCAST, dl, VT, Op); |
| 13882 | |
| 13883 | // VECTOR_REG_CAST(VECTOR_REG_CAST(x)) == VECTOR_REG_CAST(x) |
| 13884 | if (Op->getOpcode() == ARMISD::VECTOR_REG_CAST) { |
| 13885 | // If the valuetypes are the same, we can remove the cast entirely. |
| 13886 | if (Op->getOperand(0).getValueType() == VT) |
| 13887 | return Op->getOperand(0); |
| 13888 | return DCI.DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, VT, Op->getOperand(0)); |
| 13889 | } |
| 13890 | |
| 13891 | return SDValue(); |
| 13892 | } |
| 13893 | |
| 13894 | static SDValue PerformVCMPCombine(SDNode *N, |
| 13895 | TargetLowering::DAGCombinerInfo &DCI, |
| 13896 | const ARMSubtarget *Subtarget) { |
| 13897 | if (!Subtarget->hasMVEIntegerOps()) |
| 13898 | return SDValue(); |
| 13899 | |
| 13900 | EVT VT = N->getValueType(0); |
| 13901 | SDValue Op0 = N->getOperand(0); |
| 13902 | SDValue Op1 = N->getOperand(1); |
| 13903 | ARMCC::CondCodes Cond = |
| 13904 | (ARMCC::CondCodes)cast<ConstantSDNode>(N->getOperand(2))->getZExtValue(); |
| 13905 | SDLoc dl(N); |
| 13906 | |
| 13907 | // vcmp X, 0, cc -> vcmpz X, cc |
| 13908 | if (isZeroVector(Op1)) |
| 13909 | return DCI.DAG.getNode(ARMISD::VCMPZ, dl, VT, Op0, |
| 13910 | N->getOperand(2)); |
| 13911 | |
| 13912 | unsigned SwappedCond = getSwappedCondition(Cond); |
| 13913 | if (isValidMVECond(SwappedCond, VT.isFloatingPoint())) { |
| 13914 | // vcmp 0, X, cc -> vcmpz X, reversed(cc) |
| 13915 | if (isZeroVector(Op0)) |
| 13916 | return DCI.DAG.getNode(ARMISD::VCMPZ, dl, VT, Op1, |
| 13917 | DCI.DAG.getConstant(SwappedCond, dl, MVT::i32)); |
| 13918 | // vcmp vdup(Y), X, cc -> vcmp X, vdup(Y), reversed(cc) |
| 13919 | if (Op0->getOpcode() == ARMISD::VDUP && Op1->getOpcode() != ARMISD::VDUP) |
| 13920 | return DCI.DAG.getNode(ARMISD::VCMP, dl, VT, Op1, Op0, |
| 13921 | DCI.DAG.getConstant(SwappedCond, dl, MVT::i32)); |
| 13922 | } |
| 13923 | |
| 13924 | return SDValue(); |
| 13925 | } |
| 13926 | |
| 13927 | /// PerformInsertEltCombine - Target-specific dag combine xforms for |
| 13928 | /// ISD::INSERT_VECTOR_ELT. |
| 13929 | static SDValue PerformInsertEltCombine(SDNode *N, |
| 13930 | TargetLowering::DAGCombinerInfo &DCI) { |
| 13931 | // Bitcast an i64 load inserted into a vector to f64. |
| 13932 | // Otherwise, the i64 value will be legalized to a pair of i32 values. |
| 13933 | EVT VT = N->getValueType(0); |
| 13934 | SDNode *Elt = N->getOperand(1).getNode(); |
| 13935 | if (VT.getVectorElementType() != MVT::i64 || |
| 13936 | !ISD::isNormalLoad(Elt) || cast<LoadSDNode>(Elt)->isVolatile()) |
| 13937 | return SDValue(); |
| 13938 | |
| 13939 | SelectionDAG &DAG = DCI.DAG; |
| 13940 | SDLoc dl(N); |
| 13941 | EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, |
| 13942 | VT.getVectorNumElements()); |
| 13943 | SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, N->getOperand(0)); |
| 13944 | SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(1)); |
| 13945 | // Make the DAGCombiner fold the bitcasts. |
| 13946 | DCI.AddToWorklist(Vec.getNode()); |
| 13947 | DCI.AddToWorklist(V.getNode()); |
| 13948 | SDValue InsElt = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, FloatVT, |
| 13949 | Vec, V, N->getOperand(2)); |
| 13950 | return DAG.getNode(ISD::BITCAST, dl, VT, InsElt); |
| 13951 | } |
| 13952 | |
| 13953 | static SDValue (SDNode *N, |
| 13954 | TargetLowering::DAGCombinerInfo &DCI) { |
| 13955 | SDValue Op0 = N->getOperand(0); |
| 13956 | EVT VT = N->getValueType(0); |
| 13957 | SDLoc dl(N); |
| 13958 | |
| 13959 | // extract (vdup x) -> x |
| 13960 | if (Op0->getOpcode() == ARMISD::VDUP) { |
| 13961 | SDValue X = Op0->getOperand(0); |
| 13962 | if (VT == MVT::f16 && X.getValueType() == MVT::i32) |
| 13963 | return DCI.DAG.getNode(ARMISD::VMOVhr, dl, VT, X); |
| 13964 | if (VT == MVT::i32 && X.getValueType() == MVT::f16) |
| 13965 | return DCI.DAG.getNode(ARMISD::VMOVrh, dl, VT, X); |
| 13966 | |
| 13967 | while (X.getValueType() != VT && X->getOpcode() == ISD::BITCAST) |
| 13968 | X = X->getOperand(0); |
| 13969 | if (X.getValueType() == VT) |
| 13970 | return X; |
| 13971 | } |
| 13972 | |
| 13973 | return SDValue(); |
| 13974 | } |
| 13975 | |
| 13976 | /// PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for |
| 13977 | /// ISD::VECTOR_SHUFFLE. |
| 13978 | static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) { |
| 13979 | // The LLVM shufflevector instruction does not require the shuffle mask |
| 13980 | // length to match the operand vector length, but ISD::VECTOR_SHUFFLE does |
| 13981 | // have that requirement. When translating to ISD::VECTOR_SHUFFLE, if the |
| 13982 | // operands do not match the mask length, they are extended by concatenating |
| 13983 | // them with undef vectors. That is probably the right thing for other |
| 13984 | // targets, but for NEON it is better to concatenate two double-register |
| 13985 | // size vector operands into a single quad-register size vector. Do that |
| 13986 | // transformation here: |
| 13987 | // shuffle(concat(v1, undef), concat(v2, undef)) -> |
| 13988 | // shuffle(concat(v1, v2), undef) |
| 13989 | SDValue Op0 = N->getOperand(0); |
| 13990 | SDValue Op1 = N->getOperand(1); |
| 13991 | if (Op0.getOpcode() != ISD::CONCAT_VECTORS || |
| 13992 | Op1.getOpcode() != ISD::CONCAT_VECTORS || |
| 13993 | Op0.getNumOperands() != 2 || |
| 13994 | Op1.getNumOperands() != 2) |
| 13995 | return SDValue(); |
| 13996 | SDValue Concat0Op1 = Op0.getOperand(1); |
| 13997 | SDValue Concat1Op1 = Op1.getOperand(1); |
| 13998 | if (!Concat0Op1.isUndef() || !Concat1Op1.isUndef()) |
| 13999 | return SDValue(); |
| 14000 | // Skip the transformation if any of the types are illegal. |
| 14001 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 14002 | EVT VT = N->getValueType(0); |
| 14003 | if (!TLI.isTypeLegal(VT) || |
| 14004 | !TLI.isTypeLegal(Concat0Op1.getValueType()) || |
| 14005 | !TLI.isTypeLegal(Concat1Op1.getValueType())) |
| 14006 | return SDValue(); |
| 14007 | |
| 14008 | SDValue NewConcat = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, |
| 14009 | Op0.getOperand(0), Op1.getOperand(0)); |
| 14010 | // Translate the shuffle mask. |
| 14011 | SmallVector<int, 16> NewMask; |
| 14012 | unsigned NumElts = VT.getVectorNumElements(); |
| 14013 | unsigned HalfElts = NumElts/2; |
| 14014 | ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); |
| 14015 | for (unsigned n = 0; n < NumElts; ++n) { |
| 14016 | int MaskElt = SVN->getMaskElt(n); |
| 14017 | int NewElt = -1; |
| 14018 | if (MaskElt < (int)HalfElts) |
| 14019 | NewElt = MaskElt; |
| 14020 | else if (MaskElt >= (int)NumElts && MaskElt < (int)(NumElts + HalfElts)) |
| 14021 | NewElt = HalfElts + MaskElt - NumElts; |
| 14022 | NewMask.push_back(NewElt); |
| 14023 | } |
| 14024 | return DAG.getVectorShuffle(VT, SDLoc(N), NewConcat, |
| 14025 | DAG.getUNDEF(VT), NewMask); |
| 14026 | } |
| 14027 | |
| 14028 | /// CombineBaseUpdate - Target-specific DAG combine function for VLDDUP, |
| 14029 | /// NEON load/store intrinsics, and generic vector load/stores, to merge |
| 14030 | /// base address updates. |
| 14031 | /// For generic load/stores, the memory type is assumed to be a vector. |
| 14032 | /// The caller is assumed to have checked legality. |
| 14033 | static SDValue CombineBaseUpdate(SDNode *N, |
| 14034 | TargetLowering::DAGCombinerInfo &DCI) { |
| 14035 | SelectionDAG &DAG = DCI.DAG; |
| 14036 | const bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID || |
| 14037 | N->getOpcode() == ISD::INTRINSIC_W_CHAIN); |
| 14038 | const bool isStore = N->getOpcode() == ISD::STORE; |
| 14039 | const unsigned AddrOpIdx = ((isIntrinsic || isStore) ? 2 : 1); |
| 14040 | SDValue Addr = N->getOperand(AddrOpIdx); |
| 14041 | MemSDNode *MemN = cast<MemSDNode>(N); |
| 14042 | SDLoc dl(N); |
| 14043 | |
| 14044 | // Search for a use of the address operand that is an increment. |
| 14045 | for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), |
| 14046 | UE = Addr.getNode()->use_end(); UI != UE; ++UI) { |
| 14047 | SDNode *User = *UI; |
| 14048 | if (User->getOpcode() != ISD::ADD || |
| 14049 | UI.getUse().getResNo() != Addr.getResNo()) |
| 14050 | continue; |
| 14051 | |
| 14052 | // Check that the add is independent of the load/store. Otherwise, folding |
| 14053 | // it would create a cycle. We can avoid searching through Addr as it's a |
| 14054 | // predecessor to both. |
| 14055 | SmallPtrSet<const SDNode *, 32> Visited; |
| 14056 | SmallVector<const SDNode *, 16> Worklist; |
| 14057 | Visited.insert(Addr.getNode()); |
| 14058 | Worklist.push_back(N); |
| 14059 | Worklist.push_back(User); |
| 14060 | if (SDNode::hasPredecessorHelper(N, Visited, Worklist) || |
| 14061 | SDNode::hasPredecessorHelper(User, Visited, Worklist)) |
| 14062 | continue; |
| 14063 | |
| 14064 | // Find the new opcode for the updating load/store. |
| 14065 | bool isLoadOp = true; |
| 14066 | bool isLaneOp = false; |
| 14067 | unsigned NewOpc = 0; |
| 14068 | unsigned NumVecs = 0; |
| 14069 | if (isIntrinsic) { |
| 14070 | unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); |
| 14071 | switch (IntNo) { |
| 14072 | default: llvm_unreachable("unexpected intrinsic for Neon base update" ); |
| 14073 | case Intrinsic::arm_neon_vld1: NewOpc = ARMISD::VLD1_UPD; |
| 14074 | NumVecs = 1; break; |
| 14075 | case Intrinsic::arm_neon_vld2: NewOpc = ARMISD::VLD2_UPD; |
| 14076 | NumVecs = 2; break; |
| 14077 | case Intrinsic::arm_neon_vld3: NewOpc = ARMISD::VLD3_UPD; |
| 14078 | NumVecs = 3; break; |
| 14079 | case Intrinsic::arm_neon_vld4: NewOpc = ARMISD::VLD4_UPD; |
| 14080 | NumVecs = 4; break; |
| 14081 | case Intrinsic::arm_neon_vld1x2: |
| 14082 | case Intrinsic::arm_neon_vld1x3: |
| 14083 | case Intrinsic::arm_neon_vld1x4: |
| 14084 | case Intrinsic::arm_neon_vld2dup: |
| 14085 | case Intrinsic::arm_neon_vld3dup: |
| 14086 | case Intrinsic::arm_neon_vld4dup: |
| 14087 | // TODO: Support updating VLD1x and VLDxDUP nodes. For now, we just skip |
| 14088 | // combining base updates for such intrinsics. |
| 14089 | continue; |
| 14090 | case Intrinsic::arm_neon_vld2lane: NewOpc = ARMISD::VLD2LN_UPD; |
| 14091 | NumVecs = 2; isLaneOp = true; break; |
| 14092 | case Intrinsic::arm_neon_vld3lane: NewOpc = ARMISD::VLD3LN_UPD; |
| 14093 | NumVecs = 3; isLaneOp = true; break; |
| 14094 | case Intrinsic::arm_neon_vld4lane: NewOpc = ARMISD::VLD4LN_UPD; |
| 14095 | NumVecs = 4; isLaneOp = true; break; |
| 14096 | case Intrinsic::arm_neon_vst1: NewOpc = ARMISD::VST1_UPD; |
| 14097 | NumVecs = 1; isLoadOp = false; break; |
| 14098 | case Intrinsic::arm_neon_vst2: NewOpc = ARMISD::VST2_UPD; |
| 14099 | NumVecs = 2; isLoadOp = false; break; |
| 14100 | case Intrinsic::arm_neon_vst3: NewOpc = ARMISD::VST3_UPD; |
| 14101 | NumVecs = 3; isLoadOp = false; break; |
| 14102 | case Intrinsic::arm_neon_vst4: NewOpc = ARMISD::VST4_UPD; |
| 14103 | NumVecs = 4; isLoadOp = false; break; |
| 14104 | case Intrinsic::arm_neon_vst2lane: NewOpc = ARMISD::VST2LN_UPD; |
| 14105 | NumVecs = 2; isLoadOp = false; isLaneOp = true; break; |
| 14106 | case Intrinsic::arm_neon_vst3lane: NewOpc = ARMISD::VST3LN_UPD; |
| 14107 | NumVecs = 3; isLoadOp = false; isLaneOp = true; break; |
| 14108 | case Intrinsic::arm_neon_vst4lane: NewOpc = ARMISD::VST4LN_UPD; |
| 14109 | NumVecs = 4; isLoadOp = false; isLaneOp = true; break; |
| 14110 | } |
| 14111 | } else { |
| 14112 | isLaneOp = true; |
| 14113 | switch (N->getOpcode()) { |
| 14114 | default: llvm_unreachable("unexpected opcode for Neon base update" ); |
| 14115 | case ARMISD::VLD1DUP: NewOpc = ARMISD::VLD1DUP_UPD; NumVecs = 1; break; |
| 14116 | case ARMISD::VLD2DUP: NewOpc = ARMISD::VLD2DUP_UPD; NumVecs = 2; break; |
| 14117 | case ARMISD::VLD3DUP: NewOpc = ARMISD::VLD3DUP_UPD; NumVecs = 3; break; |
| 14118 | case ARMISD::VLD4DUP: NewOpc = ARMISD::VLD4DUP_UPD; NumVecs = 4; break; |
| 14119 | case ISD::LOAD: NewOpc = ARMISD::VLD1_UPD; |
| 14120 | NumVecs = 1; isLaneOp = false; break; |
| 14121 | case ISD::STORE: NewOpc = ARMISD::VST1_UPD; |
| 14122 | NumVecs = 1; isLaneOp = false; isLoadOp = false; break; |
| 14123 | } |
| 14124 | } |
| 14125 | |
| 14126 | // Find the size of memory referenced by the load/store. |
| 14127 | EVT VecTy; |
| 14128 | if (isLoadOp) { |
| 14129 | VecTy = N->getValueType(0); |
| 14130 | } else if (isIntrinsic) { |
| 14131 | VecTy = N->getOperand(AddrOpIdx+1).getValueType(); |
| 14132 | } else { |
| 14133 | assert(isStore && "Node has to be a load, a store, or an intrinsic!" ); |
| 14134 | VecTy = N->getOperand(1).getValueType(); |
| 14135 | } |
| 14136 | |
| 14137 | unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8; |
| 14138 | if (isLaneOp) |
| 14139 | NumBytes /= VecTy.getVectorNumElements(); |
| 14140 | |
| 14141 | // If the increment is a constant, it must match the memory ref size. |
| 14142 | SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0); |
| 14143 | ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode()); |
| 14144 | if (NumBytes >= 3 * 16 && (!CInc || CInc->getZExtValue() != NumBytes)) { |
| 14145 | // VLD3/4 and VST3/4 for 128-bit vectors are implemented with two |
| 14146 | // separate instructions that make it harder to use a non-constant update. |
| 14147 | continue; |
| 14148 | } |
| 14149 | |
| 14150 | // OK, we found an ADD we can fold into the base update. |
| 14151 | // Now, create a _UPD node, taking care of not breaking alignment. |
| 14152 | |
| 14153 | EVT AlignedVecTy = VecTy; |
| 14154 | unsigned Alignment = MemN->getAlignment(); |
| 14155 | |
| 14156 | // If this is a less-than-standard-aligned load/store, change the type to |
| 14157 | // match the standard alignment. |
| 14158 | // The alignment is overlooked when selecting _UPD variants; and it's |
| 14159 | // easier to introduce bitcasts here than fix that. |
| 14160 | // There are 3 ways to get to this base-update combine: |
| 14161 | // - intrinsics: they are assumed to be properly aligned (to the standard |
| 14162 | // alignment of the memory type), so we don't need to do anything. |
| 14163 | // - ARMISD::VLDx nodes: they are only generated from the aforementioned |
| 14164 | // intrinsics, so, likewise, there's nothing to do. |
| 14165 | // - generic load/store instructions: the alignment is specified as an |
| 14166 | // explicit operand, rather than implicitly as the standard alignment |
| 14167 | // of the memory type (like the intrisics). We need to change the |
| 14168 | // memory type to match the explicit alignment. That way, we don't |
| 14169 | // generate non-standard-aligned ARMISD::VLDx nodes. |
| 14170 | if (isa<LSBaseSDNode>(N)) { |
| 14171 | if (Alignment == 0) |
| 14172 | Alignment = 1; |
| 14173 | if (Alignment < VecTy.getScalarSizeInBits() / 8) { |
| 14174 | MVT EltTy = MVT::getIntegerVT(Alignment * 8); |
| 14175 | assert(NumVecs == 1 && "Unexpected multi-element generic load/store." ); |
| 14176 | assert(!isLaneOp && "Unexpected generic load/store lane." ); |
| 14177 | unsigned NumElts = NumBytes / (EltTy.getSizeInBits() / 8); |
| 14178 | AlignedVecTy = MVT::getVectorVT(EltTy, NumElts); |
| 14179 | } |
| 14180 | // Don't set an explicit alignment on regular load/stores that we want |
| 14181 | // to transform to VLD/VST 1_UPD nodes. |
| 14182 | // This matches the behavior of regular load/stores, which only get an |
| 14183 | // explicit alignment if the MMO alignment is larger than the standard |
| 14184 | // alignment of the memory type. |
| 14185 | // Intrinsics, however, always get an explicit alignment, set to the |
| 14186 | // alignment of the MMO. |
| 14187 | Alignment = 1; |
| 14188 | } |
| 14189 | |
| 14190 | // Create the new updating load/store node. |
| 14191 | // First, create an SDVTList for the new updating node's results. |
| 14192 | EVT Tys[6]; |
| 14193 | unsigned NumResultVecs = (isLoadOp ? NumVecs : 0); |
| 14194 | unsigned n; |
| 14195 | for (n = 0; n < NumResultVecs; ++n) |
| 14196 | Tys[n] = AlignedVecTy; |
| 14197 | Tys[n++] = MVT::i32; |
| 14198 | Tys[n] = MVT::Other; |
| 14199 | SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumResultVecs+2)); |
| 14200 | |
| 14201 | // Then, gather the new node's operands. |
| 14202 | SmallVector<SDValue, 8> Ops; |
| 14203 | Ops.push_back(N->getOperand(0)); // incoming chain |
| 14204 | Ops.push_back(N->getOperand(AddrOpIdx)); |
| 14205 | Ops.push_back(Inc); |
| 14206 | |
| 14207 | if (StoreSDNode *StN = dyn_cast<StoreSDNode>(N)) { |
| 14208 | // Try to match the intrinsic's signature |
| 14209 | Ops.push_back(StN->getValue()); |
| 14210 | } else { |
| 14211 | // Loads (and of course intrinsics) match the intrinsics' signature, |
| 14212 | // so just add all but the alignment operand. |
| 14213 | for (unsigned i = AddrOpIdx + 1; i < N->getNumOperands() - 1; ++i) |
| 14214 | Ops.push_back(N->getOperand(i)); |
| 14215 | } |
| 14216 | |
| 14217 | // For all node types, the alignment operand is always the last one. |
| 14218 | Ops.push_back(DAG.getConstant(Alignment, dl, MVT::i32)); |
| 14219 | |
| 14220 | // If this is a non-standard-aligned STORE, the penultimate operand is the |
| 14221 | // stored value. Bitcast it to the aligned type. |
| 14222 | if (AlignedVecTy != VecTy && N->getOpcode() == ISD::STORE) { |
| 14223 | SDValue &StVal = Ops[Ops.size()-2]; |
| 14224 | StVal = DAG.getNode(ISD::BITCAST, dl, AlignedVecTy, StVal); |
| 14225 | } |
| 14226 | |
| 14227 | EVT LoadVT = isLaneOp ? VecTy.getVectorElementType() : AlignedVecTy; |
| 14228 | SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, dl, SDTys, Ops, LoadVT, |
| 14229 | MemN->getMemOperand()); |
| 14230 | |
| 14231 | // Update the uses. |
| 14232 | SmallVector<SDValue, 5> NewResults; |
| 14233 | for (unsigned i = 0; i < NumResultVecs; ++i) |
| 14234 | NewResults.push_back(SDValue(UpdN.getNode(), i)); |
| 14235 | |
| 14236 | // If this is an non-standard-aligned LOAD, the first result is the loaded |
| 14237 | // value. Bitcast it to the expected result type. |
| 14238 | if (AlignedVecTy != VecTy && N->getOpcode() == ISD::LOAD) { |
| 14239 | SDValue &LdVal = NewResults[0]; |
| 14240 | LdVal = DAG.getNode(ISD::BITCAST, dl, VecTy, LdVal); |
| 14241 | } |
| 14242 | |
| 14243 | NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs+1)); // chain |
| 14244 | DCI.CombineTo(N, NewResults); |
| 14245 | DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs)); |
| 14246 | |
| 14247 | break; |
| 14248 | } |
| 14249 | return SDValue(); |
| 14250 | } |
| 14251 | |
| 14252 | static SDValue PerformVLDCombine(SDNode *N, |
| 14253 | TargetLowering::DAGCombinerInfo &DCI) { |
| 14254 | if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) |
| 14255 | return SDValue(); |
| 14256 | |
| 14257 | return CombineBaseUpdate(N, DCI); |
| 14258 | } |
| 14259 | |
| 14260 | static SDValue PerformMVEVLDCombine(SDNode *N, |
| 14261 | TargetLowering::DAGCombinerInfo &DCI) { |
| 14262 | if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) |
| 14263 | return SDValue(); |
| 14264 | |
| 14265 | SelectionDAG &DAG = DCI.DAG; |
| 14266 | SDValue Addr = N->getOperand(2); |
| 14267 | MemSDNode *MemN = cast<MemSDNode>(N); |
| 14268 | SDLoc dl(N); |
| 14269 | |
| 14270 | // For the stores, where there are multiple intrinsics we only actually want |
| 14271 | // to post-inc the last of the them. |
| 14272 | unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); |
| 14273 | if (IntNo == Intrinsic::arm_mve_vst2q && |
| 14274 | cast<ConstantSDNode>(N->getOperand(5))->getZExtValue() != 1) |
| 14275 | return SDValue(); |
| 14276 | if (IntNo == Intrinsic::arm_mve_vst4q && |
| 14277 | cast<ConstantSDNode>(N->getOperand(7))->getZExtValue() != 3) |
| 14278 | return SDValue(); |
| 14279 | |
| 14280 | // Search for a use of the address operand that is an increment. |
| 14281 | for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), |
| 14282 | UE = Addr.getNode()->use_end(); |
| 14283 | UI != UE; ++UI) { |
| 14284 | SDNode *User = *UI; |
| 14285 | if (User->getOpcode() != ISD::ADD || |
| 14286 | UI.getUse().getResNo() != Addr.getResNo()) |
| 14287 | continue; |
| 14288 | |
| 14289 | // Check that the add is independent of the load/store. Otherwise, folding |
| 14290 | // it would create a cycle. We can avoid searching through Addr as it's a |
| 14291 | // predecessor to both. |
| 14292 | SmallPtrSet<const SDNode *, 32> Visited; |
| 14293 | SmallVector<const SDNode *, 16> Worklist; |
| 14294 | Visited.insert(Addr.getNode()); |
| 14295 | Worklist.push_back(N); |
| 14296 | Worklist.push_back(User); |
| 14297 | if (SDNode::hasPredecessorHelper(N, Visited, Worklist) || |
| 14298 | SDNode::hasPredecessorHelper(User, Visited, Worklist)) |
| 14299 | continue; |
| 14300 | |
| 14301 | // Find the new opcode for the updating load/store. |
| 14302 | bool isLoadOp = true; |
| 14303 | unsigned NewOpc = 0; |
| 14304 | unsigned NumVecs = 0; |
| 14305 | switch (IntNo) { |
| 14306 | default: |
| 14307 | llvm_unreachable("unexpected intrinsic for MVE VLDn combine" ); |
| 14308 | case Intrinsic::arm_mve_vld2q: |
| 14309 | NewOpc = ARMISD::VLD2_UPD; |
| 14310 | NumVecs = 2; |
| 14311 | break; |
| 14312 | case Intrinsic::arm_mve_vld4q: |
| 14313 | NewOpc = ARMISD::VLD4_UPD; |
| 14314 | NumVecs = 4; |
| 14315 | break; |
| 14316 | case Intrinsic::arm_mve_vst2q: |
| 14317 | NewOpc = ARMISD::VST2_UPD; |
| 14318 | NumVecs = 2; |
| 14319 | isLoadOp = false; |
| 14320 | break; |
| 14321 | case Intrinsic::arm_mve_vst4q: |
| 14322 | NewOpc = ARMISD::VST4_UPD; |
| 14323 | NumVecs = 4; |
| 14324 | isLoadOp = false; |
| 14325 | break; |
| 14326 | } |
| 14327 | |
| 14328 | // Find the size of memory referenced by the load/store. |
| 14329 | EVT VecTy; |
| 14330 | if (isLoadOp) { |
| 14331 | VecTy = N->getValueType(0); |
| 14332 | } else { |
| 14333 | VecTy = N->getOperand(3).getValueType(); |
| 14334 | } |
| 14335 | |
| 14336 | unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8; |
| 14337 | |
| 14338 | // If the increment is a constant, it must match the memory ref size. |
| 14339 | SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0); |
| 14340 | ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode()); |
| 14341 | if (!CInc || CInc->getZExtValue() != NumBytes) |
| 14342 | continue; |
| 14343 | |
| 14344 | // Create the new updating load/store node. |
| 14345 | // First, create an SDVTList for the new updating node's results. |
| 14346 | EVT Tys[6]; |
| 14347 | unsigned NumResultVecs = (isLoadOp ? NumVecs : 0); |
| 14348 | unsigned n; |
| 14349 | for (n = 0; n < NumResultVecs; ++n) |
| 14350 | Tys[n] = VecTy; |
| 14351 | Tys[n++] = MVT::i32; |
| 14352 | Tys[n] = MVT::Other; |
| 14353 | SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumResultVecs + 2)); |
| 14354 | |
| 14355 | // Then, gather the new node's operands. |
| 14356 | SmallVector<SDValue, 8> Ops; |
| 14357 | Ops.push_back(N->getOperand(0)); // incoming chain |
| 14358 | Ops.push_back(N->getOperand(2)); // ptr |
| 14359 | Ops.push_back(Inc); |
| 14360 | |
| 14361 | for (unsigned i = 3; i < N->getNumOperands(); ++i) |
| 14362 | Ops.push_back(N->getOperand(i)); |
| 14363 | |
| 14364 | SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, dl, SDTys, Ops, VecTy, |
| 14365 | MemN->getMemOperand()); |
| 14366 | |
| 14367 | // Update the uses. |
| 14368 | SmallVector<SDValue, 5> NewResults; |
| 14369 | for (unsigned i = 0; i < NumResultVecs; ++i) |
| 14370 | NewResults.push_back(SDValue(UpdN.getNode(), i)); |
| 14371 | |
| 14372 | NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs + 1)); // chain |
| 14373 | DCI.CombineTo(N, NewResults); |
| 14374 | DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs)); |
| 14375 | |
| 14376 | break; |
| 14377 | } |
| 14378 | |
| 14379 | return SDValue(); |
| 14380 | } |
| 14381 | |
| 14382 | /// CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a |
| 14383 | /// vldN-lane (N > 1) intrinsic, and if all the other uses of that intrinsic |
| 14384 | /// are also VDUPLANEs. If so, combine them to a vldN-dup operation and |
| 14385 | /// return true. |
| 14386 | static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { |
| 14387 | SelectionDAG &DAG = DCI.DAG; |
| 14388 | EVT VT = N->getValueType(0); |
| 14389 | // vldN-dup instructions only support 64-bit vectors for N > 1. |
| 14390 | if (!VT.is64BitVector()) |
| 14391 | return false; |
| 14392 | |
| 14393 | // Check if the VDUPLANE operand is a vldN-dup intrinsic. |
| 14394 | SDNode *VLD = N->getOperand(0).getNode(); |
| 14395 | if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN) |
| 14396 | return false; |
| 14397 | unsigned NumVecs = 0; |
| 14398 | unsigned NewOpc = 0; |
| 14399 | unsigned IntNo = cast<ConstantSDNode>(VLD->getOperand(1))->getZExtValue(); |
| 14400 | if (IntNo == Intrinsic::arm_neon_vld2lane) { |
| 14401 | NumVecs = 2; |
| 14402 | NewOpc = ARMISD::VLD2DUP; |
| 14403 | } else if (IntNo == Intrinsic::arm_neon_vld3lane) { |
| 14404 | NumVecs = 3; |
| 14405 | NewOpc = ARMISD::VLD3DUP; |
| 14406 | } else if (IntNo == Intrinsic::arm_neon_vld4lane) { |
| 14407 | NumVecs = 4; |
| 14408 | NewOpc = ARMISD::VLD4DUP; |
| 14409 | } else { |
| 14410 | return false; |
| 14411 | } |
| 14412 | |
| 14413 | // First check that all the vldN-lane uses are VDUPLANEs and that the lane |
| 14414 | // numbers match the load. |
| 14415 | unsigned VLDLaneNo = |
| 14416 | cast<ConstantSDNode>(VLD->getOperand(NumVecs+3))->getZExtValue(); |
| 14417 | for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); |
| 14418 | UI != UE; ++UI) { |
| 14419 | // Ignore uses of the chain result. |
| 14420 | if (UI.getUse().getResNo() == NumVecs) |
| 14421 | continue; |
| 14422 | SDNode *User = *UI; |
| 14423 | if (User->getOpcode() != ARMISD::VDUPLANE || |
| 14424 | VLDLaneNo != cast<ConstantSDNode>(User->getOperand(1))->getZExtValue()) |
| 14425 | return false; |
| 14426 | } |
| 14427 | |
| 14428 | // Create the vldN-dup node. |
| 14429 | EVT Tys[5]; |
| 14430 | unsigned n; |
| 14431 | for (n = 0; n < NumVecs; ++n) |
| 14432 | Tys[n] = VT; |
| 14433 | Tys[n] = MVT::Other; |
| 14434 | SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumVecs+1)); |
| 14435 | SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) }; |
| 14436 | MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD); |
| 14437 | SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, SDLoc(VLD), SDTys, |
| 14438 | Ops, VLDMemInt->getMemoryVT(), |
| 14439 | VLDMemInt->getMemOperand()); |
| 14440 | |
| 14441 | // Update the uses. |
| 14442 | for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); |
| 14443 | UI != UE; ++UI) { |
| 14444 | unsigned ResNo = UI.getUse().getResNo(); |
| 14445 | // Ignore uses of the chain result. |
| 14446 | if (ResNo == NumVecs) |
| 14447 | continue; |
| 14448 | SDNode *User = *UI; |
| 14449 | DCI.CombineTo(User, SDValue(VLDDup.getNode(), ResNo)); |
| 14450 | } |
| 14451 | |
| 14452 | // Now the vldN-lane intrinsic is dead except for its chain result. |
| 14453 | // Update uses of the chain. |
| 14454 | std::vector<SDValue> VLDDupResults; |
| 14455 | for (unsigned n = 0; n < NumVecs; ++n) |
| 14456 | VLDDupResults.push_back(SDValue(VLDDup.getNode(), n)); |
| 14457 | VLDDupResults.push_back(SDValue(VLDDup.getNode(), NumVecs)); |
| 14458 | DCI.CombineTo(VLD, VLDDupResults); |
| 14459 | |
| 14460 | return true; |
| 14461 | } |
| 14462 | |
| 14463 | /// PerformVDUPLANECombine - Target-specific dag combine xforms for |
| 14464 | /// ARMISD::VDUPLANE. |
| 14465 | static SDValue PerformVDUPLANECombine(SDNode *N, |
| 14466 | TargetLowering::DAGCombinerInfo &DCI, |
| 14467 | const ARMSubtarget *Subtarget) { |
| 14468 | SDValue Op = N->getOperand(0); |
| 14469 | EVT VT = N->getValueType(0); |
| 14470 | |
| 14471 | // On MVE, we just convert the VDUPLANE to a VDUP with an extract. |
| 14472 | if (Subtarget->hasMVEIntegerOps()) { |
| 14473 | EVT = VT.getVectorElementType(); |
| 14474 | // We need to ensure we are creating a legal type. |
| 14475 | if (!DCI.DAG.getTargetLoweringInfo().isTypeLegal(ExtractVT)) |
| 14476 | ExtractVT = MVT::i32; |
| 14477 | SDValue = DCI.DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N), ExtractVT, |
| 14478 | N->getOperand(0), N->getOperand(1)); |
| 14479 | return DCI.DAG.getNode(ARMISD::VDUP, SDLoc(N), VT, Extract); |
| 14480 | } |
| 14481 | |
| 14482 | // If the source is a vldN-lane (N > 1) intrinsic, and all the other uses |
| 14483 | // of that intrinsic are also VDUPLANEs, combine them to a vldN-dup operation. |
| 14484 | if (CombineVLDDUP(N, DCI)) |
| 14485 | return SDValue(N, 0); |
| 14486 | |
| 14487 | // If the source is already a VMOVIMM or VMVNIMM splat, the VDUPLANE is |
| 14488 | // redundant. Ignore bit_converts for now; element sizes are checked below. |
| 14489 | while (Op.getOpcode() == ISD::BITCAST) |
| 14490 | Op = Op.getOperand(0); |
| 14491 | if (Op.getOpcode() != ARMISD::VMOVIMM && Op.getOpcode() != ARMISD::VMVNIMM) |
| 14492 | return SDValue(); |
| 14493 | |
| 14494 | // Make sure the VMOV element size is not bigger than the VDUPLANE elements. |
| 14495 | unsigned EltSize = Op.getScalarValueSizeInBits(); |
| 14496 | // The canonical VMOV for a zero vector uses a 32-bit element size. |
| 14497 | unsigned Imm = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); |
| 14498 | unsigned EltBits; |
| 14499 | if (ARM_AM::decodeVMOVModImm(Imm, EltBits) == 0) |
| 14500 | EltSize = 8; |
| 14501 | if (EltSize > VT.getScalarSizeInBits()) |
| 14502 | return SDValue(); |
| 14503 | |
| 14504 | return DCI.DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op); |
| 14505 | } |
| 14506 | |
| 14507 | /// PerformVDUPCombine - Target-specific dag combine xforms for ARMISD::VDUP. |
| 14508 | static SDValue PerformVDUPCombine(SDNode *N, |
| 14509 | TargetLowering::DAGCombinerInfo &DCI, |
| 14510 | const ARMSubtarget *Subtarget) { |
| 14511 | SelectionDAG &DAG = DCI.DAG; |
| 14512 | SDValue Op = N->getOperand(0); |
| 14513 | SDLoc dl(N); |
| 14514 | |
| 14515 | if (Subtarget->hasMVEIntegerOps()) { |
| 14516 | // Convert VDUP f32 -> VDUP BITCAST i32 under MVE, as we know the value will |
| 14517 | // need to come from a GPR. |
| 14518 | if (Op.getValueType() == MVT::f32) |
| 14519 | return DCI.DAG.getNode(ARMISD::VDUP, dl, N->getValueType(0), |
| 14520 | DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op)); |
| 14521 | else if (Op.getValueType() == MVT::f16) |
| 14522 | return DCI.DAG.getNode(ARMISD::VDUP, dl, N->getValueType(0), |
| 14523 | DAG.getNode(ARMISD::VMOVrh, dl, MVT::i32, Op)); |
| 14524 | } |
| 14525 | |
| 14526 | if (!Subtarget->hasNEON()) |
| 14527 | return SDValue(); |
| 14528 | |
| 14529 | // Match VDUP(LOAD) -> VLD1DUP. |
| 14530 | // We match this pattern here rather than waiting for isel because the |
| 14531 | // transform is only legal for unindexed loads. |
| 14532 | LoadSDNode *LD = dyn_cast<LoadSDNode>(Op.getNode()); |
| 14533 | if (LD && Op.hasOneUse() && LD->isUnindexed() && |
| 14534 | LD->getMemoryVT() == N->getValueType(0).getVectorElementType()) { |
| 14535 | SDValue Ops[] = { LD->getOperand(0), LD->getOperand(1), |
| 14536 | DAG.getConstant(LD->getAlignment(), SDLoc(N), MVT::i32) }; |
| 14537 | SDVTList SDTys = DAG.getVTList(N->getValueType(0), MVT::Other); |
| 14538 | SDValue VLDDup = DAG.getMemIntrinsicNode(ARMISD::VLD1DUP, SDLoc(N), SDTys, |
| 14539 | Ops, LD->getMemoryVT(), |
| 14540 | LD->getMemOperand()); |
| 14541 | DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), VLDDup.getValue(1)); |
| 14542 | return VLDDup; |
| 14543 | } |
| 14544 | |
| 14545 | return SDValue(); |
| 14546 | } |
| 14547 | |
| 14548 | static SDValue PerformLOADCombine(SDNode *N, |
| 14549 | TargetLowering::DAGCombinerInfo &DCI) { |
| 14550 | EVT VT = N->getValueType(0); |
| 14551 | |
| 14552 | // If this is a legal vector load, try to combine it into a VLD1_UPD. |
| 14553 | if (ISD::isNormalLoad(N) && VT.isVector() && |
| 14554 | DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT)) |
| 14555 | return CombineBaseUpdate(N, DCI); |
| 14556 | |
| 14557 | return SDValue(); |
| 14558 | } |
| 14559 | |
| 14560 | // Optimize trunc store (of multiple scalars) to shuffle and store. First, |
| 14561 | // pack all of the elements in one place. Next, store to memory in fewer |
| 14562 | // chunks. |
| 14563 | static SDValue PerformTruncatingStoreCombine(StoreSDNode *St, |
| 14564 | SelectionDAG &DAG) { |
| 14565 | SDValue StVal = St->getValue(); |
| 14566 | EVT VT = StVal.getValueType(); |
| 14567 | if (!St->isTruncatingStore() || !VT.isVector()) |
| 14568 | return SDValue(); |
| 14569 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 14570 | EVT StVT = St->getMemoryVT(); |
| 14571 | unsigned NumElems = VT.getVectorNumElements(); |
| 14572 | assert(StVT != VT && "Cannot truncate to the same type" ); |
| 14573 | unsigned FromEltSz = VT.getScalarSizeInBits(); |
| 14574 | unsigned ToEltSz = StVT.getScalarSizeInBits(); |
| 14575 | |
| 14576 | // From, To sizes and ElemCount must be pow of two |
| 14577 | if (!isPowerOf2_32(NumElems * FromEltSz * ToEltSz)) |
| 14578 | return SDValue(); |
| 14579 | |
| 14580 | // We are going to use the original vector elt for storing. |
| 14581 | // Accumulated smaller vector elements must be a multiple of the store size. |
| 14582 | if (0 != (NumElems * FromEltSz) % ToEltSz) |
| 14583 | return SDValue(); |
| 14584 | |
| 14585 | unsigned SizeRatio = FromEltSz / ToEltSz; |
| 14586 | assert(SizeRatio * NumElems * ToEltSz == VT.getSizeInBits()); |
| 14587 | |
| 14588 | // Create a type on which we perform the shuffle. |
| 14589 | EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(), StVT.getScalarType(), |
| 14590 | NumElems * SizeRatio); |
| 14591 | assert(WideVecVT.getSizeInBits() == VT.getSizeInBits()); |
| 14592 | |
| 14593 | SDLoc DL(St); |
| 14594 | SDValue WideVec = DAG.getNode(ISD::BITCAST, DL, WideVecVT, StVal); |
| 14595 | SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1); |
| 14596 | for (unsigned i = 0; i < NumElems; ++i) |
| 14597 | ShuffleVec[i] = DAG.getDataLayout().isBigEndian() ? (i + 1) * SizeRatio - 1 |
| 14598 | : i * SizeRatio; |
| 14599 | |
| 14600 | // Can't shuffle using an illegal type. |
| 14601 | if (!TLI.isTypeLegal(WideVecVT)) |
| 14602 | return SDValue(); |
| 14603 | |
| 14604 | SDValue Shuff = DAG.getVectorShuffle( |
| 14605 | WideVecVT, DL, WideVec, DAG.getUNDEF(WideVec.getValueType()), ShuffleVec); |
| 14606 | // At this point all of the data is stored at the bottom of the |
| 14607 | // register. We now need to save it to mem. |
| 14608 | |
| 14609 | // Find the largest store unit |
| 14610 | MVT StoreType = MVT::i8; |
| 14611 | for (MVT Tp : MVT::integer_valuetypes()) { |
| 14612 | if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToEltSz) |
| 14613 | StoreType = Tp; |
| 14614 | } |
| 14615 | // Didn't find a legal store type. |
| 14616 | if (!TLI.isTypeLegal(StoreType)) |
| 14617 | return SDValue(); |
| 14618 | |
| 14619 | // Bitcast the original vector into a vector of store-size units |
| 14620 | EVT StoreVecVT = |
| 14621 | EVT::getVectorVT(*DAG.getContext(), StoreType, |
| 14622 | VT.getSizeInBits() / EVT(StoreType).getSizeInBits()); |
| 14623 | assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits()); |
| 14624 | SDValue ShuffWide = DAG.getNode(ISD::BITCAST, DL, StoreVecVT, Shuff); |
| 14625 | SmallVector<SDValue, 8> Chains; |
| 14626 | SDValue Increment = DAG.getConstant(StoreType.getSizeInBits() / 8, DL, |
| 14627 | TLI.getPointerTy(DAG.getDataLayout())); |
| 14628 | SDValue BasePtr = St->getBasePtr(); |
| 14629 | |
| 14630 | // Perform one or more big stores into memory. |
| 14631 | unsigned E = (ToEltSz * NumElems) / StoreType.getSizeInBits(); |
| 14632 | for (unsigned I = 0; I < E; I++) { |
| 14633 | SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, StoreType, |
| 14634 | ShuffWide, DAG.getIntPtrConstant(I, DL)); |
| 14635 | SDValue Ch = |
| 14636 | DAG.getStore(St->getChain(), DL, SubVec, BasePtr, St->getPointerInfo(), |
| 14637 | St->getAlignment(), St->getMemOperand()->getFlags()); |
| 14638 | BasePtr = |
| 14639 | DAG.getNode(ISD::ADD, DL, BasePtr.getValueType(), BasePtr, Increment); |
| 14640 | Chains.push_back(Ch); |
| 14641 | } |
| 14642 | return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); |
| 14643 | } |
| 14644 | |
| 14645 | // Try taking a single vector store from an truncate (which would otherwise turn |
| 14646 | // into an expensive buildvector) and splitting it into a series of narrowing |
| 14647 | // stores. |
| 14648 | static SDValue PerformSplittingToNarrowingStores(StoreSDNode *St, |
| 14649 | SelectionDAG &DAG) { |
| 14650 | if (!St->isSimple() || St->isTruncatingStore() || !St->isUnindexed()) |
| 14651 | return SDValue(); |
| 14652 | SDValue Trunc = St->getValue(); |
| 14653 | if (Trunc->getOpcode() != ISD::TRUNCATE && Trunc->getOpcode() != ISD::FP_ROUND) |
| 14654 | return SDValue(); |
| 14655 | EVT FromVT = Trunc->getOperand(0).getValueType(); |
| 14656 | EVT ToVT = Trunc.getValueType(); |
| 14657 | if (!ToVT.isVector()) |
| 14658 | return SDValue(); |
| 14659 | assert(FromVT.getVectorNumElements() == ToVT.getVectorNumElements()); |
| 14660 | EVT ToEltVT = ToVT.getVectorElementType(); |
| 14661 | EVT FromEltVT = FromVT.getVectorElementType(); |
| 14662 | |
| 14663 | unsigned NumElements = 0; |
| 14664 | if (FromEltVT == MVT::i32 && (ToEltVT == MVT::i16 || ToEltVT == MVT::i8)) |
| 14665 | NumElements = 4; |
| 14666 | if (FromEltVT == MVT::i16 && ToEltVT == MVT::i8) |
| 14667 | NumElements = 8; |
| 14668 | if (FromEltVT == MVT::f32 && ToEltVT == MVT::f16) |
| 14669 | NumElements = 4; |
| 14670 | if (NumElements == 0 || |
| 14671 | (FromEltVT != MVT::f32 && FromVT.getVectorNumElements() == NumElements) || |
| 14672 | FromVT.getVectorNumElements() % NumElements != 0) |
| 14673 | return SDValue(); |
| 14674 | |
| 14675 | // Test if the Trunc will be convertable to a VMOVN with a shuffle, and if so |
| 14676 | // use the VMOVN over splitting the store. We are looking for patterns of: |
| 14677 | // !rev: 0 N 1 N+1 2 N+2 ... |
| 14678 | // rev: N 0 N+1 1 N+2 2 ... |
| 14679 | // The shuffle may either be a single source (in which case N = NumElts/2) or |
| 14680 | // two inputs extended with concat to the same size (in which case N = |
| 14681 | // NumElts). |
| 14682 | auto isVMOVNShuffle = [&](ShuffleVectorSDNode *SVN, bool Rev) { |
| 14683 | ArrayRef<int> M = SVN->getMask(); |
| 14684 | unsigned NumElts = ToVT.getVectorNumElements(); |
| 14685 | if (SVN->getOperand(1).isUndef()) |
| 14686 | NumElts /= 2; |
| 14687 | |
| 14688 | unsigned Off0 = Rev ? NumElts : 0; |
| 14689 | unsigned Off1 = Rev ? 0 : NumElts; |
| 14690 | |
| 14691 | for (unsigned I = 0; I < NumElts; I += 2) { |
| 14692 | if (M[I] >= 0 && M[I] != (int)(Off0 + I / 2)) |
| 14693 | return false; |
| 14694 | if (M[I + 1] >= 0 && M[I + 1] != (int)(Off1 + I / 2)) |
| 14695 | return false; |
| 14696 | } |
| 14697 | |
| 14698 | return true; |
| 14699 | }; |
| 14700 | |
| 14701 | // It may be preferable to keep the store unsplit as the trunc may end up |
| 14702 | // being removed. Check that here. |
| 14703 | if (Trunc.getOperand(0).getOpcode() == ISD::SMIN) { |
| 14704 | if (SDValue U = PerformVQDMULHCombine(Trunc.getOperand(0).getNode(), DAG)) { |
| 14705 | DAG.ReplaceAllUsesWith(Trunc.getOperand(0), U); |
| 14706 | return SDValue(); |
| 14707 | } |
| 14708 | } |
| 14709 | if (auto *Shuffle = dyn_cast<ShuffleVectorSDNode>(Trunc.getOperand(0))) |
| 14710 | if (isVMOVNShuffle(Shuffle, false) || isVMOVNShuffle(Shuffle, true)) |
| 14711 | return SDValue(); |
| 14712 | |
| 14713 | LLVMContext &C = *DAG.getContext(); |
| 14714 | SDLoc DL(St); |
| 14715 | // Details about the old store |
| 14716 | SDValue Ch = St->getChain(); |
| 14717 | SDValue BasePtr = St->getBasePtr(); |
| 14718 | Align Alignment = St->getOriginalAlign(); |
| 14719 | MachineMemOperand::Flags MMOFlags = St->getMemOperand()->getFlags(); |
| 14720 | AAMDNodes AAInfo = St->getAAInfo(); |
| 14721 | |
| 14722 | // We split the store into slices of NumElements. fp16 trunc stores are vcvt |
| 14723 | // and then stored as truncating integer stores. |
| 14724 | EVT NewFromVT = EVT::getVectorVT(C, FromEltVT, NumElements); |
| 14725 | EVT NewToVT = EVT::getVectorVT( |
| 14726 | C, EVT::getIntegerVT(C, ToEltVT.getSizeInBits()), NumElements); |
| 14727 | |
| 14728 | SmallVector<SDValue, 4> Stores; |
| 14729 | for (unsigned i = 0; i < FromVT.getVectorNumElements() / NumElements; i++) { |
| 14730 | unsigned NewOffset = i * NumElements * ToEltVT.getSizeInBits() / 8; |
| 14731 | SDValue NewPtr = |
| 14732 | DAG.getObjectPtrOffset(DL, BasePtr, TypeSize::Fixed(NewOffset)); |
| 14733 | |
| 14734 | SDValue = |
| 14735 | DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, NewFromVT, Trunc.getOperand(0), |
| 14736 | DAG.getConstant(i * NumElements, DL, MVT::i32)); |
| 14737 | |
| 14738 | if (ToEltVT == MVT::f16) { |
| 14739 | SDValue FPTrunc = |
| 14740 | DAG.getNode(ARMISD::VCVTN, DL, MVT::v8f16, DAG.getUNDEF(MVT::v8f16), |
| 14741 | Extract, DAG.getConstant(0, DL, MVT::i32)); |
| 14742 | Extract = DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, MVT::v4i32, FPTrunc); |
| 14743 | } |
| 14744 | |
| 14745 | SDValue Store = DAG.getTruncStore( |
| 14746 | Ch, DL, Extract, NewPtr, St->getPointerInfo().getWithOffset(NewOffset), |
| 14747 | NewToVT, Alignment.value(), MMOFlags, AAInfo); |
| 14748 | Stores.push_back(Store); |
| 14749 | } |
| 14750 | return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Stores); |
| 14751 | } |
| 14752 | |
| 14753 | /// PerformSTORECombine - Target-specific dag combine xforms for |
| 14754 | /// ISD::STORE. |
| 14755 | static SDValue PerformSTORECombine(SDNode *N, |
| 14756 | TargetLowering::DAGCombinerInfo &DCI, |
| 14757 | const ARMSubtarget *Subtarget) { |
| 14758 | StoreSDNode *St = cast<StoreSDNode>(N); |
| 14759 | if (St->isVolatile()) |
| 14760 | return SDValue(); |
| 14761 | SDValue StVal = St->getValue(); |
| 14762 | EVT VT = StVal.getValueType(); |
| 14763 | |
| 14764 | if (Subtarget->hasNEON()) |
| 14765 | if (SDValue Store = PerformTruncatingStoreCombine(St, DCI.DAG)) |
| 14766 | return Store; |
| 14767 | |
| 14768 | if (Subtarget->hasMVEIntegerOps()) |
| 14769 | if (SDValue NewToken = PerformSplittingToNarrowingStores(St, DCI.DAG)) |
| 14770 | return NewToken; |
| 14771 | |
| 14772 | if (!ISD::isNormalStore(St)) |
| 14773 | return SDValue(); |
| 14774 | |
| 14775 | // Split a store of a VMOVDRR into two integer stores to avoid mixing NEON and |
| 14776 | // ARM stores of arguments in the same cache line. |
| 14777 | if (StVal.getNode()->getOpcode() == ARMISD::VMOVDRR && |
| 14778 | StVal.getNode()->hasOneUse()) { |
| 14779 | SelectionDAG &DAG = DCI.DAG; |
| 14780 | bool isBigEndian = DAG.getDataLayout().isBigEndian(); |
| 14781 | SDLoc DL(St); |
| 14782 | SDValue BasePtr = St->getBasePtr(); |
| 14783 | SDValue NewST1 = DAG.getStore( |
| 14784 | St->getChain(), DL, StVal.getNode()->getOperand(isBigEndian ? 1 : 0), |
| 14785 | BasePtr, St->getPointerInfo(), St->getOriginalAlign(), |
| 14786 | St->getMemOperand()->getFlags()); |
| 14787 | |
| 14788 | SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, |
| 14789 | DAG.getConstant(4, DL, MVT::i32)); |
| 14790 | return DAG.getStore(NewST1.getValue(0), DL, |
| 14791 | StVal.getNode()->getOperand(isBigEndian ? 0 : 1), |
| 14792 | OffsetPtr, St->getPointerInfo().getWithOffset(4), |
| 14793 | St->getOriginalAlign(), |
| 14794 | St->getMemOperand()->getFlags()); |
| 14795 | } |
| 14796 | |
| 14797 | if (StVal.getValueType() == MVT::i64 && |
| 14798 | StVal.getNode()->getOpcode() == ISD::EXTRACT_VECTOR_ELT) { |
| 14799 | |
| 14800 | // Bitcast an i64 store extracted from a vector to f64. |
| 14801 | // Otherwise, the i64 value will be legalized to a pair of i32 values. |
| 14802 | SelectionDAG &DAG = DCI.DAG; |
| 14803 | SDLoc dl(StVal); |
| 14804 | SDValue IntVec = StVal.getOperand(0); |
| 14805 | EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, |
| 14806 | IntVec.getValueType().getVectorNumElements()); |
| 14807 | SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, IntVec); |
| 14808 | SDValue ExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, |
| 14809 | Vec, StVal.getOperand(1)); |
| 14810 | dl = SDLoc(N); |
| 14811 | SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ExtElt); |
| 14812 | // Make the DAGCombiner fold the bitcasts. |
| 14813 | DCI.AddToWorklist(Vec.getNode()); |
| 14814 | DCI.AddToWorklist(ExtElt.getNode()); |
| 14815 | DCI.AddToWorklist(V.getNode()); |
| 14816 | return DAG.getStore(St->getChain(), dl, V, St->getBasePtr(), |
| 14817 | St->getPointerInfo(), St->getAlignment(), |
| 14818 | St->getMemOperand()->getFlags(), St->getAAInfo()); |
| 14819 | } |
| 14820 | |
| 14821 | // If this is a legal vector store, try to combine it into a VST1_UPD. |
| 14822 | if (Subtarget->hasNEON() && ISD::isNormalStore(N) && VT.isVector() && |
| 14823 | DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT)) |
| 14824 | return CombineBaseUpdate(N, DCI); |
| 14825 | |
| 14826 | return SDValue(); |
| 14827 | } |
| 14828 | |
| 14829 | /// PerformVCVTCombine - VCVT (floating-point to fixed-point, Advanced SIMD) |
| 14830 | /// can replace combinations of VMUL and VCVT (floating-point to integer) |
| 14831 | /// when the VMUL has a constant operand that is a power of 2. |
| 14832 | /// |
| 14833 | /// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>): |
| 14834 | /// vmul.f32 d16, d17, d16 |
| 14835 | /// vcvt.s32.f32 d16, d16 |
| 14836 | /// becomes: |
| 14837 | /// vcvt.s32.f32 d16, d16, #3 |
| 14838 | static SDValue PerformVCVTCombine(SDNode *N, SelectionDAG &DAG, |
| 14839 | const ARMSubtarget *Subtarget) { |
| 14840 | if (!Subtarget->hasNEON()) |
| 14841 | return SDValue(); |
| 14842 | |
| 14843 | SDValue Op = N->getOperand(0); |
| 14844 | if (!Op.getValueType().isVector() || !Op.getValueType().isSimple() || |
| 14845 | Op.getOpcode() != ISD::FMUL) |
| 14846 | return SDValue(); |
| 14847 | |
| 14848 | SDValue ConstVec = Op->getOperand(1); |
| 14849 | if (!isa<BuildVectorSDNode>(ConstVec)) |
| 14850 | return SDValue(); |
| 14851 | |
| 14852 | MVT FloatTy = Op.getSimpleValueType().getVectorElementType(); |
| 14853 | uint32_t FloatBits = FloatTy.getSizeInBits(); |
| 14854 | MVT IntTy = N->getSimpleValueType(0).getVectorElementType(); |
| 14855 | uint32_t IntBits = IntTy.getSizeInBits(); |
| 14856 | unsigned NumLanes = Op.getValueType().getVectorNumElements(); |
| 14857 | if (FloatBits != 32 || IntBits > 32 || (NumLanes != 4 && NumLanes != 2)) { |
| 14858 | // These instructions only exist converting from f32 to i32. We can handle |
| 14859 | // smaller integers by generating an extra truncate, but larger ones would |
| 14860 | // be lossy. We also can't handle anything other than 2 or 4 lanes, since |
| 14861 | // these intructions only support v2i32/v4i32 types. |
| 14862 | return SDValue(); |
| 14863 | } |
| 14864 | |
| 14865 | BitVector UndefElements; |
| 14866 | BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec); |
| 14867 | int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, 33); |
| 14868 | if (C == -1 || C == 0 || C > 32) |
| 14869 | return SDValue(); |
| 14870 | |
| 14871 | SDLoc dl(N); |
| 14872 | bool isSigned = N->getOpcode() == ISD::FP_TO_SINT; |
| 14873 | unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfp2fxs : |
| 14874 | Intrinsic::arm_neon_vcvtfp2fxu; |
| 14875 | SDValue FixConv = DAG.getNode( |
| 14876 | ISD::INTRINSIC_WO_CHAIN, dl, NumLanes == 2 ? MVT::v2i32 : MVT::v4i32, |
| 14877 | DAG.getConstant(IntrinsicOpcode, dl, MVT::i32), Op->getOperand(0), |
| 14878 | DAG.getConstant(C, dl, MVT::i32)); |
| 14879 | |
| 14880 | if (IntBits < FloatBits) |
| 14881 | FixConv = DAG.getNode(ISD::TRUNCATE, dl, N->getValueType(0), FixConv); |
| 14882 | |
| 14883 | return FixConv; |
| 14884 | } |
| 14885 | |
| 14886 | /// PerformVDIVCombine - VCVT (fixed-point to floating-point, Advanced SIMD) |
| 14887 | /// can replace combinations of VCVT (integer to floating-point) and VDIV |
| 14888 | /// when the VDIV has a constant operand that is a power of 2. |
| 14889 | /// |
| 14890 | /// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>): |
| 14891 | /// vcvt.f32.s32 d16, d16 |
| 14892 | /// vdiv.f32 d16, d17, d16 |
| 14893 | /// becomes: |
| 14894 | /// vcvt.f32.s32 d16, d16, #3 |
| 14895 | static SDValue PerformVDIVCombine(SDNode *N, SelectionDAG &DAG, |
| 14896 | const ARMSubtarget *Subtarget) { |
| 14897 | if (!Subtarget->hasNEON()) |
| 14898 | return SDValue(); |
| 14899 | |
| 14900 | SDValue Op = N->getOperand(0); |
| 14901 | unsigned OpOpcode = Op.getNode()->getOpcode(); |
| 14902 | if (!N->getValueType(0).isVector() || !N->getValueType(0).isSimple() || |
| 14903 | (OpOpcode != ISD::SINT_TO_FP && OpOpcode != ISD::UINT_TO_FP)) |
| 14904 | return SDValue(); |
| 14905 | |
| 14906 | SDValue ConstVec = N->getOperand(1); |
| 14907 | if (!isa<BuildVectorSDNode>(ConstVec)) |
| 14908 | return SDValue(); |
| 14909 | |
| 14910 | MVT FloatTy = N->getSimpleValueType(0).getVectorElementType(); |
| 14911 | uint32_t FloatBits = FloatTy.getSizeInBits(); |
| 14912 | MVT IntTy = Op.getOperand(0).getSimpleValueType().getVectorElementType(); |
| 14913 | uint32_t IntBits = IntTy.getSizeInBits(); |
| 14914 | unsigned NumLanes = Op.getValueType().getVectorNumElements(); |
| 14915 | if (FloatBits != 32 || IntBits > 32 || (NumLanes != 4 && NumLanes != 2)) { |
| 14916 | // These instructions only exist converting from i32 to f32. We can handle |
| 14917 | // smaller integers by generating an extra extend, but larger ones would |
| 14918 | // be lossy. We also can't handle anything other than 2 or 4 lanes, since |
| 14919 | // these intructions only support v2i32/v4i32 types. |
| 14920 | return SDValue(); |
| 14921 | } |
| 14922 | |
| 14923 | BitVector UndefElements; |
| 14924 | BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec); |
| 14925 | int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, 33); |
| 14926 | if (C == -1 || C == 0 || C > 32) |
| 14927 | return SDValue(); |
| 14928 | |
| 14929 | SDLoc dl(N); |
| 14930 | bool isSigned = OpOpcode == ISD::SINT_TO_FP; |
| 14931 | SDValue ConvInput = Op.getOperand(0); |
| 14932 | if (IntBits < FloatBits) |
| 14933 | ConvInput = DAG.getNode(isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, |
| 14934 | dl, NumLanes == 2 ? MVT::v2i32 : MVT::v4i32, |
| 14935 | ConvInput); |
| 14936 | |
| 14937 | unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfxs2fp : |
| 14938 | Intrinsic::arm_neon_vcvtfxu2fp; |
| 14939 | return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, |
| 14940 | Op.getValueType(), |
| 14941 | DAG.getConstant(IntrinsicOpcode, dl, MVT::i32), |
| 14942 | ConvInput, DAG.getConstant(C, dl, MVT::i32)); |
| 14943 | } |
| 14944 | |
| 14945 | static SDValue PerformVECREDUCE_ADDCombine(SDNode *N, SelectionDAG &DAG, |
| 14946 | const ARMSubtarget *ST) { |
| 14947 | if (!ST->hasMVEIntegerOps()) |
| 14948 | return SDValue(); |
| 14949 | |
| 14950 | assert(N->getOpcode() == ISD::VECREDUCE_ADD); |
| 14951 | EVT ResVT = N->getValueType(0); |
| 14952 | SDValue N0 = N->getOperand(0); |
| 14953 | SDLoc dl(N); |
| 14954 | |
| 14955 | // We are looking for something that will have illegal types if left alone, |
| 14956 | // but that we can convert to a single instruction undef MVE. For example |
| 14957 | // vecreduce_add(sext(A, v8i32)) => VADDV.s16 A |
| 14958 | // or |
| 14959 | // vecreduce_add(mul(zext(A, v16i32), zext(B, v16i32))) => VMLADAV.u8 A, B |
| 14960 | |
| 14961 | // Cases: |
| 14962 | // VADDV u/s 8/16/32 |
| 14963 | // VMLAV u/s 8/16/32 |
| 14964 | // VADDLV u/s 32 |
| 14965 | // VMLALV u/s 16/32 |
| 14966 | |
| 14967 | // If the input vector is smaller than legal (v4i8/v4i16 for example) we can |
| 14968 | // extend it and use v4i32 instead. |
| 14969 | auto ExtendIfNeeded = [&](SDValue A, unsigned ExtendCode) { |
| 14970 | EVT AVT = A.getValueType(); |
| 14971 | if (!AVT.is128BitVector()) |
| 14972 | A = DAG.getNode(ExtendCode, dl, |
| 14973 | AVT.changeVectorElementType(MVT::getIntegerVT( |
| 14974 | 128 / AVT.getVectorMinNumElements())), |
| 14975 | A); |
| 14976 | return A; |
| 14977 | }; |
| 14978 | auto IsVADDV = [&](MVT RetTy, unsigned ExtendCode, ArrayRef<MVT> ExtTypes) { |
| 14979 | if (ResVT != RetTy || N0->getOpcode() != ExtendCode) |
| 14980 | return SDValue(); |
| 14981 | SDValue A = N0->getOperand(0); |
| 14982 | if (llvm::any_of(ExtTypes, [&A](MVT Ty) { return A.getValueType() == Ty; })) |
| 14983 | return ExtendIfNeeded(A, ExtendCode); |
| 14984 | return SDValue(); |
| 14985 | }; |
| 14986 | auto IsPredVADDV = [&](MVT RetTy, unsigned ExtendCode, |
| 14987 | ArrayRef<MVT> ExtTypes, SDValue &Mask) { |
| 14988 | if (ResVT != RetTy || N0->getOpcode() != ISD::VSELECT || |
| 14989 | !ISD::isBuildVectorAllZeros(N0->getOperand(2).getNode())) |
| 14990 | return SDValue(); |
| 14991 | Mask = N0->getOperand(0); |
| 14992 | SDValue Ext = N0->getOperand(1); |
| 14993 | if (Ext->getOpcode() != ExtendCode) |
| 14994 | return SDValue(); |
| 14995 | SDValue A = Ext->getOperand(0); |
| 14996 | if (llvm::any_of(ExtTypes, [&A](MVT Ty) { return A.getValueType() == Ty; })) |
| 14997 | return ExtendIfNeeded(A, ExtendCode); |
| 14998 | return SDValue(); |
| 14999 | }; |
| 15000 | auto IsVMLAV = [&](MVT RetTy, unsigned ExtendCode, ArrayRef<MVT> ExtTypes, |
| 15001 | SDValue &A, SDValue &B) { |
| 15002 | // For a vmla we are trying to match a larger pattern: |
| 15003 | // ExtA = sext/zext A |
| 15004 | // ExtB = sext/zext B |
| 15005 | // Mul = mul ExtA, ExtB |
| 15006 | // vecreduce.add Mul |
| 15007 | // There might also be en extra extend between the mul and the addreduce, so |
| 15008 | // long as the bitwidth is high enough to make them equivalent (for example |
| 15009 | // original v8i16 might be mul at v8i32 and the reduce happens at v8i64). |
| 15010 | if (ResVT != RetTy) |
| 15011 | return false; |
| 15012 | SDValue Mul = N0; |
| 15013 | if (Mul->getOpcode() == ExtendCode && |
| 15014 | Mul->getOperand(0).getScalarValueSizeInBits() * 2 >= |
| 15015 | ResVT.getScalarSizeInBits()) |
| 15016 | Mul = Mul->getOperand(0); |
| 15017 | if (Mul->getOpcode() != ISD::MUL) |
| 15018 | return false; |
| 15019 | SDValue ExtA = Mul->getOperand(0); |
| 15020 | SDValue ExtB = Mul->getOperand(1); |
| 15021 | if (ExtA->getOpcode() != ExtendCode && ExtB->getOpcode() != ExtendCode) |
| 15022 | return false; |
| 15023 | A = ExtA->getOperand(0); |
| 15024 | B = ExtB->getOperand(0); |
| 15025 | if (A.getValueType() == B.getValueType() && |
| 15026 | llvm::any_of(ExtTypes, |
| 15027 | [&A](MVT Ty) { return A.getValueType() == Ty; })) { |
| 15028 | A = ExtendIfNeeded(A, ExtendCode); |
| 15029 | B = ExtendIfNeeded(B, ExtendCode); |
| 15030 | return true; |
| 15031 | } |
| 15032 | return false; |
| 15033 | }; |
| 15034 | auto IsPredVMLAV = [&](MVT RetTy, unsigned ExtendCode, ArrayRef<MVT> ExtTypes, |
| 15035 | SDValue &A, SDValue &B, SDValue &Mask) { |
| 15036 | // Same as the pattern above with a select for the zero predicated lanes |
| 15037 | // ExtA = sext/zext A |
| 15038 | // ExtB = sext/zext B |
| 15039 | // Mul = mul ExtA, ExtB |
| 15040 | // N0 = select Mask, Mul, 0 |
| 15041 | // vecreduce.add N0 |
| 15042 | if (ResVT != RetTy || N0->getOpcode() != ISD::VSELECT || |
| 15043 | !ISD::isBuildVectorAllZeros(N0->getOperand(2).getNode())) |
| 15044 | return false; |
| 15045 | Mask = N0->getOperand(0); |
| 15046 | SDValue Mul = N0->getOperand(1); |
| 15047 | if (Mul->getOpcode() == ExtendCode && |
| 15048 | Mul->getOperand(0).getScalarValueSizeInBits() * 2 >= |
| 15049 | ResVT.getScalarSizeInBits()) |
| 15050 | Mul = Mul->getOperand(0); |
| 15051 | if (Mul->getOpcode() != ISD::MUL) |
| 15052 | return false; |
| 15053 | SDValue ExtA = Mul->getOperand(0); |
| 15054 | SDValue ExtB = Mul->getOperand(1); |
| 15055 | if (ExtA->getOpcode() != ExtendCode && ExtB->getOpcode() != ExtendCode) |
| 15056 | return false; |
| 15057 | A = ExtA->getOperand(0); |
| 15058 | B = ExtB->getOperand(0); |
| 15059 | if (A.getValueType() == B.getValueType() && |
| 15060 | llvm::any_of(ExtTypes, |
| 15061 | [&A](MVT Ty) { return A.getValueType() == Ty; })) { |
| 15062 | A = ExtendIfNeeded(A, ExtendCode); |
| 15063 | B = ExtendIfNeeded(B, ExtendCode); |
| 15064 | return true; |
| 15065 | } |
| 15066 | return false; |
| 15067 | }; |
| 15068 | auto Create64bitNode = [&](unsigned Opcode, ArrayRef<SDValue> Ops) { |
| 15069 | SDValue Node = DAG.getNode(Opcode, dl, {MVT::i32, MVT::i32}, Ops); |
| 15070 | return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Node, |
| 15071 | SDValue(Node.getNode(), 1)); |
| 15072 | }; |
| 15073 | |
| 15074 | if (SDValue A = IsVADDV(MVT::i32, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v16i8})) |
| 15075 | return DAG.getNode(ARMISD::VADDVs, dl, ResVT, A); |
| 15076 | if (SDValue A = IsVADDV(MVT::i32, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v16i8})) |
| 15077 | return DAG.getNode(ARMISD::VADDVu, dl, ResVT, A); |
| 15078 | if (SDValue A = IsVADDV(MVT::i64, ISD::SIGN_EXTEND, |
| 15079 | {MVT::v4i8, MVT::v4i16, MVT::v4i32})) |
| 15080 | return Create64bitNode(ARMISD::VADDLVs, {A}); |
| 15081 | if (SDValue A = IsVADDV(MVT::i64, ISD::ZERO_EXTEND, |
| 15082 | {MVT::v4i8, MVT::v4i16, MVT::v4i32})) |
| 15083 | return Create64bitNode(ARMISD::VADDLVu, {A}); |
| 15084 | if (SDValue A = IsVADDV(MVT::i16, ISD::SIGN_EXTEND, {MVT::v16i8})) |
| 15085 | return DAG.getNode(ISD::TRUNCATE, dl, ResVT, |
| 15086 | DAG.getNode(ARMISD::VADDVs, dl, MVT::i32, A)); |
| 15087 | if (SDValue A = IsVADDV(MVT::i16, ISD::ZERO_EXTEND, {MVT::v16i8})) |
| 15088 | return DAG.getNode(ISD::TRUNCATE, dl, ResVT, |
| 15089 | DAG.getNode(ARMISD::VADDVu, dl, MVT::i32, A)); |
| 15090 | |
| 15091 | SDValue Mask; |
| 15092 | if (SDValue A = IsPredVADDV(MVT::i32, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v16i8}, Mask)) |
| 15093 | return DAG.getNode(ARMISD::VADDVps, dl, ResVT, A, Mask); |
| 15094 | if (SDValue A = IsPredVADDV(MVT::i32, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v16i8}, Mask)) |
| 15095 | return DAG.getNode(ARMISD::VADDVpu, dl, ResVT, A, Mask); |
| 15096 | if (SDValue A = IsPredVADDV(MVT::i64, ISD::SIGN_EXTEND, |
| 15097 | {MVT::v4i8, MVT::v4i16, MVT::v4i32}, Mask)) |
| 15098 | return Create64bitNode(ARMISD::VADDLVps, {A, Mask}); |
| 15099 | if (SDValue A = IsPredVADDV(MVT::i64, ISD::ZERO_EXTEND, |
| 15100 | {MVT::v4i8, MVT::v4i16, MVT::v4i32}, Mask)) |
| 15101 | return Create64bitNode(ARMISD::VADDLVpu, {A, Mask}); |
| 15102 | if (SDValue A = IsPredVADDV(MVT::i16, ISD::SIGN_EXTEND, {MVT::v16i8}, Mask)) |
| 15103 | return DAG.getNode(ISD::TRUNCATE, dl, ResVT, |
| 15104 | DAG.getNode(ARMISD::VADDVps, dl, MVT::i32, A, Mask)); |
| 15105 | if (SDValue A = IsPredVADDV(MVT::i16, ISD::ZERO_EXTEND, {MVT::v16i8}, Mask)) |
| 15106 | return DAG.getNode(ISD::TRUNCATE, dl, ResVT, |
| 15107 | DAG.getNode(ARMISD::VADDVpu, dl, MVT::i32, A, Mask)); |
| 15108 | |
| 15109 | SDValue A, B; |
| 15110 | if (IsVMLAV(MVT::i32, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B)) |
| 15111 | return DAG.getNode(ARMISD::VMLAVs, dl, ResVT, A, B); |
| 15112 | if (IsVMLAV(MVT::i32, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B)) |
| 15113 | return DAG.getNode(ARMISD::VMLAVu, dl, ResVT, A, B); |
| 15114 | if (IsVMLAV(MVT::i64, ISD::SIGN_EXTEND, |
| 15115 | {MVT::v8i8, MVT::v8i16, MVT::v4i8, MVT::v4i16, MVT::v4i32}, A, B)) |
| 15116 | return Create64bitNode(ARMISD::VMLALVs, {A, B}); |
| 15117 | if (IsVMLAV(MVT::i64, ISD::ZERO_EXTEND, |
| 15118 | {MVT::v8i8, MVT::v8i16, MVT::v4i8, MVT::v4i16, MVT::v4i32}, A, B)) |
| 15119 | return Create64bitNode(ARMISD::VMLALVu, {A, B}); |
| 15120 | if (IsVMLAV(MVT::i16, ISD::SIGN_EXTEND, {MVT::v16i8}, A, B)) |
| 15121 | return DAG.getNode(ISD::TRUNCATE, dl, ResVT, |
| 15122 | DAG.getNode(ARMISD::VMLAVs, dl, MVT::i32, A, B)); |
| 15123 | if (IsVMLAV(MVT::i16, ISD::ZERO_EXTEND, {MVT::v16i8}, A, B)) |
| 15124 | return DAG.getNode(ISD::TRUNCATE, dl, ResVT, |
| 15125 | DAG.getNode(ARMISD::VMLAVu, dl, MVT::i32, A, B)); |
| 15126 | |
| 15127 | if (IsPredVMLAV(MVT::i32, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B, Mask)) |
| 15128 | return DAG.getNode(ARMISD::VMLAVps, dl, ResVT, A, B, Mask); |
| 15129 | if (IsPredVMLAV(MVT::i32, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B, Mask)) |
| 15130 | return DAG.getNode(ARMISD::VMLAVpu, dl, ResVT, A, B, Mask); |
| 15131 | if (IsPredVMLAV(MVT::i64, ISD::SIGN_EXTEND, |
| 15132 | {MVT::v8i8, MVT::v8i16, MVT::v4i8, MVT::v4i16, MVT::v4i32}, A, |
| 15133 | B, Mask)) |
| 15134 | return Create64bitNode(ARMISD::VMLALVps, {A, B, Mask}); |
| 15135 | if (IsPredVMLAV(MVT::i64, ISD::ZERO_EXTEND, |
| 15136 | {MVT::v8i8, MVT::v8i16, MVT::v4i8, MVT::v4i16, MVT::v4i32}, A, |
| 15137 | B, Mask)) |
| 15138 | return Create64bitNode(ARMISD::VMLALVpu, {A, B, Mask}); |
| 15139 | if (IsPredVMLAV(MVT::i16, ISD::SIGN_EXTEND, {MVT::v16i8}, A, B, Mask)) |
| 15140 | return DAG.getNode(ISD::TRUNCATE, dl, ResVT, |
| 15141 | DAG.getNode(ARMISD::VMLAVps, dl, MVT::i32, A, B, Mask)); |
| 15142 | if (IsPredVMLAV(MVT::i16, ISD::ZERO_EXTEND, {MVT::v16i8}, A, B, Mask)) |
| 15143 | return DAG.getNode(ISD::TRUNCATE, dl, ResVT, |
| 15144 | DAG.getNode(ARMISD::VMLAVpu, dl, MVT::i32, A, B, Mask)); |
| 15145 | |
| 15146 | // Some complications. We can get a case where the two inputs of the mul are |
| 15147 | // the same, then the output sext will have been helpfully converted to a |
| 15148 | // zext. Turn it back. |
| 15149 | SDValue Op = N0; |
| 15150 | if (Op->getOpcode() == ISD::VSELECT) |
| 15151 | Op = Op->getOperand(1); |
| 15152 | if (Op->getOpcode() == ISD::ZERO_EXTEND && |
| 15153 | Op->getOperand(0)->getOpcode() == ISD::MUL) { |
| 15154 | SDValue Mul = Op->getOperand(0); |
| 15155 | if (Mul->getOperand(0) == Mul->getOperand(1) && |
| 15156 | Mul->getOperand(0)->getOpcode() == ISD::SIGN_EXTEND) { |
| 15157 | SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, dl, N0->getValueType(0), Mul); |
| 15158 | if (Op != N0) |
| 15159 | Ext = DAG.getNode(ISD::VSELECT, dl, N0->getValueType(0), |
| 15160 | N0->getOperand(0), Ext, N0->getOperand(2)); |
| 15161 | return DAG.getNode(ISD::VECREDUCE_ADD, dl, ResVT, Ext); |
| 15162 | } |
| 15163 | } |
| 15164 | |
| 15165 | return SDValue(); |
| 15166 | } |
| 15167 | |
| 15168 | static SDValue PerformVMOVNCombine(SDNode *N, |
| 15169 | TargetLowering::DAGCombinerInfo &DCI) { |
| 15170 | SDValue Op0 = N->getOperand(0); |
| 15171 | SDValue Op1 = N->getOperand(1); |
| 15172 | unsigned IsTop = N->getConstantOperandVal(2); |
| 15173 | |
| 15174 | // VMOVNt(c, VQMOVNb(a, b)) => VQMOVNt(c, b) |
| 15175 | // VMOVNb(c, VQMOVNb(a, b)) => VQMOVNb(c, b) |
| 15176 | if ((Op1->getOpcode() == ARMISD::VQMOVNs || |
| 15177 | Op1->getOpcode() == ARMISD::VQMOVNu) && |
| 15178 | Op1->getConstantOperandVal(2) == 0) |
| 15179 | return DCI.DAG.getNode(Op1->getOpcode(), SDLoc(Op1), N->getValueType(0), |
| 15180 | Op0, Op1->getOperand(1), N->getOperand(2)); |
| 15181 | |
| 15182 | // Only the bottom lanes from Qm (Op1) and either the top or bottom lanes from |
| 15183 | // Qd (Op0) are demanded from a VMOVN, depending on whether we are inserting |
| 15184 | // into the top or bottom lanes. |
| 15185 | unsigned NumElts = N->getValueType(0).getVectorNumElements(); |
| 15186 | APInt Op1DemandedElts = APInt::getSplat(NumElts, APInt::getLowBitsSet(2, 1)); |
| 15187 | APInt Op0DemandedElts = |
| 15188 | IsTop ? Op1DemandedElts |
| 15189 | : APInt::getSplat(NumElts, APInt::getHighBitsSet(2, 1)); |
| 15190 | |
| 15191 | APInt KnownUndef, KnownZero; |
| 15192 | const TargetLowering &TLI = DCI.DAG.getTargetLoweringInfo(); |
| 15193 | if (TLI.SimplifyDemandedVectorElts(Op0, Op0DemandedElts, KnownUndef, |
| 15194 | KnownZero, DCI)) |
| 15195 | return SDValue(N, 0); |
| 15196 | if (TLI.SimplifyDemandedVectorElts(Op1, Op1DemandedElts, KnownUndef, |
| 15197 | KnownZero, DCI)) |
| 15198 | return SDValue(N, 0); |
| 15199 | |
| 15200 | return SDValue(); |
| 15201 | } |
| 15202 | |
| 15203 | static SDValue PerformVQMOVNCombine(SDNode *N, |
| 15204 | TargetLowering::DAGCombinerInfo &DCI) { |
| 15205 | SDValue Op0 = N->getOperand(0); |
| 15206 | unsigned IsTop = N->getConstantOperandVal(2); |
| 15207 | |
| 15208 | unsigned NumElts = N->getValueType(0).getVectorNumElements(); |
| 15209 | APInt Op0DemandedElts = |
| 15210 | APInt::getSplat(NumElts, IsTop ? APInt::getLowBitsSet(2, 1) |
| 15211 | : APInt::getHighBitsSet(2, 1)); |
| 15212 | |
| 15213 | APInt KnownUndef, KnownZero; |
| 15214 | const TargetLowering &TLI = DCI.DAG.getTargetLoweringInfo(); |
| 15215 | if (TLI.SimplifyDemandedVectorElts(Op0, Op0DemandedElts, KnownUndef, |
| 15216 | KnownZero, DCI)) |
| 15217 | return SDValue(N, 0); |
| 15218 | return SDValue(); |
| 15219 | } |
| 15220 | |
| 15221 | static SDValue PerformLongShiftCombine(SDNode *N, SelectionDAG &DAG) { |
| 15222 | SDLoc DL(N); |
| 15223 | SDValue Op0 = N->getOperand(0); |
| 15224 | SDValue Op1 = N->getOperand(1); |
| 15225 | |
| 15226 | // Turn X << -C -> X >> C and viceversa. The negative shifts can come up from |
| 15227 | // uses of the intrinsics. |
| 15228 | if (auto C = dyn_cast<ConstantSDNode>(N->getOperand(2))) { |
| 15229 | int ShiftAmt = C->getSExtValue(); |
| 15230 | if (ShiftAmt == 0) { |
| 15231 | SDValue Merge = DAG.getMergeValues({Op0, Op1}, DL); |
| 15232 | DAG.ReplaceAllUsesWith(N, Merge.getNode()); |
| 15233 | return SDValue(); |
| 15234 | } |
| 15235 | |
| 15236 | if (ShiftAmt >= -32 && ShiftAmt < 0) { |
| 15237 | unsigned NewOpcode = |
| 15238 | N->getOpcode() == ARMISD::LSLL ? ARMISD::LSRL : ARMISD::LSLL; |
| 15239 | SDValue NewShift = DAG.getNode(NewOpcode, DL, N->getVTList(), Op0, Op1, |
| 15240 | DAG.getConstant(-ShiftAmt, DL, MVT::i32)); |
| 15241 | DAG.ReplaceAllUsesWith(N, NewShift.getNode()); |
| 15242 | return NewShift; |
| 15243 | } |
| 15244 | } |
| 15245 | |
| 15246 | return SDValue(); |
| 15247 | } |
| 15248 | |
| 15249 | /// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics. |
| 15250 | SDValue ARMTargetLowering::PerformIntrinsicCombine(SDNode *N, |
| 15251 | DAGCombinerInfo &DCI) const { |
| 15252 | SelectionDAG &DAG = DCI.DAG; |
| 15253 | unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); |
| 15254 | switch (IntNo) { |
| 15255 | default: |
| 15256 | // Don't do anything for most intrinsics. |
| 15257 | break; |
| 15258 | |
| 15259 | // Vector shifts: check for immediate versions and lower them. |
| 15260 | // Note: This is done during DAG combining instead of DAG legalizing because |
| 15261 | // the build_vectors for 64-bit vector element shift counts are generally |
| 15262 | // not legal, and it is hard to see their values after they get legalized to |
| 15263 | // loads from a constant pool. |
| 15264 | case Intrinsic::arm_neon_vshifts: |
| 15265 | case Intrinsic::arm_neon_vshiftu: |
| 15266 | case Intrinsic::arm_neon_vrshifts: |
| 15267 | case Intrinsic::arm_neon_vrshiftu: |
| 15268 | case Intrinsic::arm_neon_vrshiftn: |
| 15269 | case Intrinsic::arm_neon_vqshifts: |
| 15270 | case Intrinsic::arm_neon_vqshiftu: |
| 15271 | case Intrinsic::arm_neon_vqshiftsu: |
| 15272 | case Intrinsic::arm_neon_vqshiftns: |
| 15273 | case Intrinsic::arm_neon_vqshiftnu: |
| 15274 | case Intrinsic::arm_neon_vqshiftnsu: |
| 15275 | case Intrinsic::arm_neon_vqrshiftns: |
| 15276 | case Intrinsic::arm_neon_vqrshiftnu: |
| 15277 | case Intrinsic::arm_neon_vqrshiftnsu: { |
| 15278 | EVT VT = N->getOperand(1).getValueType(); |
| 15279 | int64_t Cnt; |
| 15280 | unsigned VShiftOpc = 0; |
| 15281 | |
| 15282 | switch (IntNo) { |
| 15283 | case Intrinsic::arm_neon_vshifts: |
| 15284 | case Intrinsic::arm_neon_vshiftu: |
| 15285 | if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) { |
| 15286 | VShiftOpc = ARMISD::VSHLIMM; |
| 15287 | break; |
| 15288 | } |
| 15289 | if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) { |
| 15290 | VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ? ARMISD::VSHRsIMM |
| 15291 | : ARMISD::VSHRuIMM); |
| 15292 | break; |
| 15293 | } |
| 15294 | return SDValue(); |
| 15295 | |
| 15296 | case Intrinsic::arm_neon_vrshifts: |
| 15297 | case Intrinsic::arm_neon_vrshiftu: |
| 15298 | if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) |
| 15299 | break; |
| 15300 | return SDValue(); |
| 15301 | |
| 15302 | case Intrinsic::arm_neon_vqshifts: |
| 15303 | case Intrinsic::arm_neon_vqshiftu: |
| 15304 | if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) |
| 15305 | break; |
| 15306 | return SDValue(); |
| 15307 | |
| 15308 | case Intrinsic::arm_neon_vqshiftsu: |
| 15309 | if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) |
| 15310 | break; |
| 15311 | llvm_unreachable("invalid shift count for vqshlu intrinsic" ); |
| 15312 | |
| 15313 | case Intrinsic::arm_neon_vrshiftn: |
| 15314 | case Intrinsic::arm_neon_vqshiftns: |
| 15315 | case Intrinsic::arm_neon_vqshiftnu: |
| 15316 | case Intrinsic::arm_neon_vqshiftnsu: |
| 15317 | case Intrinsic::arm_neon_vqrshiftns: |
| 15318 | case Intrinsic::arm_neon_vqrshiftnu: |
| 15319 | case Intrinsic::arm_neon_vqrshiftnsu: |
| 15320 | // Narrowing shifts require an immediate right shift. |
| 15321 | if (isVShiftRImm(N->getOperand(2), VT, true, true, Cnt)) |
| 15322 | break; |
| 15323 | llvm_unreachable("invalid shift count for narrowing vector shift " |
| 15324 | "intrinsic" ); |
| 15325 | |
| 15326 | default: |
| 15327 | llvm_unreachable("unhandled vector shift" ); |
| 15328 | } |
| 15329 | |
| 15330 | switch (IntNo) { |
| 15331 | case Intrinsic::arm_neon_vshifts: |
| 15332 | case Intrinsic::arm_neon_vshiftu: |
| 15333 | // Opcode already set above. |
| 15334 | break; |
| 15335 | case Intrinsic::arm_neon_vrshifts: |
| 15336 | VShiftOpc = ARMISD::VRSHRsIMM; |
| 15337 | break; |
| 15338 | case Intrinsic::arm_neon_vrshiftu: |
| 15339 | VShiftOpc = ARMISD::VRSHRuIMM; |
| 15340 | break; |
| 15341 | case Intrinsic::arm_neon_vrshiftn: |
| 15342 | VShiftOpc = ARMISD::VRSHRNIMM; |
| 15343 | break; |
| 15344 | case Intrinsic::arm_neon_vqshifts: |
| 15345 | VShiftOpc = ARMISD::VQSHLsIMM; |
| 15346 | break; |
| 15347 | case Intrinsic::arm_neon_vqshiftu: |
| 15348 | VShiftOpc = ARMISD::VQSHLuIMM; |
| 15349 | break; |
| 15350 | case Intrinsic::arm_neon_vqshiftsu: |
| 15351 | VShiftOpc = ARMISD::VQSHLsuIMM; |
| 15352 | break; |
| 15353 | case Intrinsic::arm_neon_vqshiftns: |
| 15354 | VShiftOpc = ARMISD::VQSHRNsIMM; |
| 15355 | break; |
| 15356 | case Intrinsic::arm_neon_vqshiftnu: |
| 15357 | VShiftOpc = ARMISD::VQSHRNuIMM; |
| 15358 | break; |
| 15359 | case Intrinsic::arm_neon_vqshiftnsu: |
| 15360 | VShiftOpc = ARMISD::VQSHRNsuIMM; |
| 15361 | break; |
| 15362 | case Intrinsic::arm_neon_vqrshiftns: |
| 15363 | VShiftOpc = ARMISD::VQRSHRNsIMM; |
| 15364 | break; |
| 15365 | case Intrinsic::arm_neon_vqrshiftnu: |
| 15366 | VShiftOpc = ARMISD::VQRSHRNuIMM; |
| 15367 | break; |
| 15368 | case Intrinsic::arm_neon_vqrshiftnsu: |
| 15369 | VShiftOpc = ARMISD::VQRSHRNsuIMM; |
| 15370 | break; |
| 15371 | } |
| 15372 | |
| 15373 | SDLoc dl(N); |
| 15374 | return DAG.getNode(VShiftOpc, dl, N->getValueType(0), |
| 15375 | N->getOperand(1), DAG.getConstant(Cnt, dl, MVT::i32)); |
| 15376 | } |
| 15377 | |
| 15378 | case Intrinsic::arm_neon_vshiftins: { |
| 15379 | EVT VT = N->getOperand(1).getValueType(); |
| 15380 | int64_t Cnt; |
| 15381 | unsigned VShiftOpc = 0; |
| 15382 | |
| 15383 | if (isVShiftLImm(N->getOperand(3), VT, false, Cnt)) |
| 15384 | VShiftOpc = ARMISD::VSLIIMM; |
| 15385 | else if (isVShiftRImm(N->getOperand(3), VT, false, true, Cnt)) |
| 15386 | VShiftOpc = ARMISD::VSRIIMM; |
| 15387 | else { |
| 15388 | llvm_unreachable("invalid shift count for vsli/vsri intrinsic" ); |
| 15389 | } |
| 15390 | |
| 15391 | SDLoc dl(N); |
| 15392 | return DAG.getNode(VShiftOpc, dl, N->getValueType(0), |
| 15393 | N->getOperand(1), N->getOperand(2), |
| 15394 | DAG.getConstant(Cnt, dl, MVT::i32)); |
| 15395 | } |
| 15396 | |
| 15397 | case Intrinsic::arm_neon_vqrshifts: |
| 15398 | case Intrinsic::arm_neon_vqrshiftu: |
| 15399 | // No immediate versions of these to check for. |
| 15400 | break; |
| 15401 | |
| 15402 | case Intrinsic::arm_mve_vqdmlah: |
| 15403 | case Intrinsic::arm_mve_vqdmlash: |
| 15404 | case Intrinsic::arm_mve_vqrdmlah: |
| 15405 | case Intrinsic::arm_mve_vqrdmlash: |
| 15406 | case Intrinsic::arm_mve_vmla_n_predicated: |
| 15407 | case Intrinsic::arm_mve_vmlas_n_predicated: |
| 15408 | case Intrinsic::arm_mve_vqdmlah_predicated: |
| 15409 | case Intrinsic::arm_mve_vqdmlash_predicated: |
| 15410 | case Intrinsic::arm_mve_vqrdmlah_predicated: |
| 15411 | case Intrinsic::arm_mve_vqrdmlash_predicated: { |
| 15412 | // These intrinsics all take an i32 scalar operand which is narrowed to the |
| 15413 | // size of a single lane of the vector type they return. So we don't need |
| 15414 | // any bits of that operand above that point, which allows us to eliminate |
| 15415 | // uxth/sxth. |
| 15416 | unsigned BitWidth = N->getValueType(0).getScalarSizeInBits(); |
| 15417 | APInt DemandedMask = APInt::getLowBitsSet(32, BitWidth); |
| 15418 | if (SimplifyDemandedBits(N->getOperand(3), DemandedMask, DCI)) |
| 15419 | return SDValue(); |
| 15420 | break; |
| 15421 | } |
| 15422 | |
| 15423 | case Intrinsic::arm_mve_minv: |
| 15424 | case Intrinsic::arm_mve_maxv: |
| 15425 | case Intrinsic::arm_mve_minav: |
| 15426 | case Intrinsic::arm_mve_maxav: |
| 15427 | case Intrinsic::arm_mve_minv_predicated: |
| 15428 | case Intrinsic::arm_mve_maxv_predicated: |
| 15429 | case Intrinsic::arm_mve_minav_predicated: |
| 15430 | case Intrinsic::arm_mve_maxav_predicated: { |
| 15431 | // These intrinsics all take an i32 scalar operand which is narrowed to the |
| 15432 | // size of a single lane of the vector type they take as the other input. |
| 15433 | unsigned BitWidth = N->getOperand(2)->getValueType(0).getScalarSizeInBits(); |
| 15434 | APInt DemandedMask = APInt::getLowBitsSet(32, BitWidth); |
| 15435 | if (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI)) |
| 15436 | return SDValue(); |
| 15437 | break; |
| 15438 | } |
| 15439 | |
| 15440 | case Intrinsic::arm_mve_addv: { |
| 15441 | // Turn this intrinsic straight into the appropriate ARMISD::VADDV node, |
| 15442 | // which allow PerformADDVecReduce to turn it into VADDLV when possible. |
| 15443 | bool Unsigned = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue(); |
| 15444 | unsigned Opc = Unsigned ? ARMISD::VADDVu : ARMISD::VADDVs; |
| 15445 | return DAG.getNode(Opc, SDLoc(N), N->getVTList(), N->getOperand(1)); |
| 15446 | } |
| 15447 | |
| 15448 | case Intrinsic::arm_mve_addlv: |
| 15449 | case Intrinsic::arm_mve_addlv_predicated: { |
| 15450 | // Same for these, but ARMISD::VADDLV has to be followed by a BUILD_PAIR |
| 15451 | // which recombines the two outputs into an i64 |
| 15452 | bool Unsigned = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue(); |
| 15453 | unsigned Opc = IntNo == Intrinsic::arm_mve_addlv ? |
| 15454 | (Unsigned ? ARMISD::VADDLVu : ARMISD::VADDLVs) : |
| 15455 | (Unsigned ? ARMISD::VADDLVpu : ARMISD::VADDLVps); |
| 15456 | |
| 15457 | SmallVector<SDValue, 4> Ops; |
| 15458 | for (unsigned i = 1, e = N->getNumOperands(); i < e; i++) |
| 15459 | if (i != 2) // skip the unsigned flag |
| 15460 | Ops.push_back(N->getOperand(i)); |
| 15461 | |
| 15462 | SDLoc dl(N); |
| 15463 | SDValue val = DAG.getNode(Opc, dl, {MVT::i32, MVT::i32}, Ops); |
| 15464 | return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, val.getValue(0), |
| 15465 | val.getValue(1)); |
| 15466 | } |
| 15467 | } |
| 15468 | |
| 15469 | return SDValue(); |
| 15470 | } |
| 15471 | |
| 15472 | /// PerformShiftCombine - Checks for immediate versions of vector shifts and |
| 15473 | /// lowers them. As with the vector shift intrinsics, this is done during DAG |
| 15474 | /// combining instead of DAG legalizing because the build_vectors for 64-bit |
| 15475 | /// vector element shift counts are generally not legal, and it is hard to see |
| 15476 | /// their values after they get legalized to loads from a constant pool. |
| 15477 | static SDValue PerformShiftCombine(SDNode *N, |
| 15478 | TargetLowering::DAGCombinerInfo &DCI, |
| 15479 | const ARMSubtarget *ST) { |
| 15480 | SelectionDAG &DAG = DCI.DAG; |
| 15481 | EVT VT = N->getValueType(0); |
| 15482 | if (N->getOpcode() == ISD::SRL && VT == MVT::i32 && ST->hasV6Ops()) { |
| 15483 | // Canonicalize (srl (bswap x), 16) to (rotr (bswap x), 16) if the high |
| 15484 | // 16-bits of x is zero. This optimizes rev + lsr 16 to rev16. |
| 15485 | SDValue N1 = N->getOperand(1); |
| 15486 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) { |
| 15487 | SDValue N0 = N->getOperand(0); |
| 15488 | if (C->getZExtValue() == 16 && N0.getOpcode() == ISD::BSWAP && |
| 15489 | DAG.MaskedValueIsZero(N0.getOperand(0), |
| 15490 | APInt::getHighBitsSet(32, 16))) |
| 15491 | return DAG.getNode(ISD::ROTR, SDLoc(N), VT, N0, N1); |
| 15492 | } |
| 15493 | } |
| 15494 | |
| 15495 | if (ST->isThumb1Only() && N->getOpcode() == ISD::SHL && VT == MVT::i32 && |
| 15496 | N->getOperand(0)->getOpcode() == ISD::AND && |
| 15497 | N->getOperand(0)->hasOneUse()) { |
| 15498 | if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) |
| 15499 | return SDValue(); |
| 15500 | // Look for the pattern (shl (and x, AndMask), ShiftAmt). This doesn't |
| 15501 | // usually show up because instcombine prefers to canonicalize it to |
| 15502 | // (and (shl x, ShiftAmt) (shl AndMask, ShiftAmt)), but the shift can come |
| 15503 | // out of GEP lowering in some cases. |
| 15504 | SDValue N0 = N->getOperand(0); |
| 15505 | ConstantSDNode *ShiftAmtNode = dyn_cast<ConstantSDNode>(N->getOperand(1)); |
| 15506 | if (!ShiftAmtNode) |
| 15507 | return SDValue(); |
| 15508 | uint32_t ShiftAmt = static_cast<uint32_t>(ShiftAmtNode->getZExtValue()); |
| 15509 | ConstantSDNode *AndMaskNode = dyn_cast<ConstantSDNode>(N0->getOperand(1)); |
| 15510 | if (!AndMaskNode) |
| 15511 | return SDValue(); |
| 15512 | uint32_t AndMask = static_cast<uint32_t>(AndMaskNode->getZExtValue()); |
| 15513 | // Don't transform uxtb/uxth. |
| 15514 | if (AndMask == 255 || AndMask == 65535) |
| 15515 | return SDValue(); |
| 15516 | if (isMask_32(AndMask)) { |
| 15517 | uint32_t MaskedBits = countLeadingZeros(AndMask); |
| 15518 | if (MaskedBits > ShiftAmt) { |
| 15519 | SDLoc DL(N); |
| 15520 | SDValue SHL = DAG.getNode(ISD::SHL, DL, MVT::i32, N0->getOperand(0), |
| 15521 | DAG.getConstant(MaskedBits, DL, MVT::i32)); |
| 15522 | return DAG.getNode( |
| 15523 | ISD::SRL, DL, MVT::i32, SHL, |
| 15524 | DAG.getConstant(MaskedBits - ShiftAmt, DL, MVT::i32)); |
| 15525 | } |
| 15526 | } |
| 15527 | } |
| 15528 | |
| 15529 | // Nothing to be done for scalar shifts. |
| 15530 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 15531 | if (!VT.isVector() || !TLI.isTypeLegal(VT)) |
| 15532 | return SDValue(); |
| 15533 | if (ST->hasMVEIntegerOps() && VT == MVT::v2i64) |
| 15534 | return SDValue(); |
| 15535 | |
| 15536 | int64_t Cnt; |
| 15537 | |
| 15538 | switch (N->getOpcode()) { |
| 15539 | default: llvm_unreachable("unexpected shift opcode" ); |
| 15540 | |
| 15541 | case ISD::SHL: |
| 15542 | if (isVShiftLImm(N->getOperand(1), VT, false, Cnt)) { |
| 15543 | SDLoc dl(N); |
| 15544 | return DAG.getNode(ARMISD::VSHLIMM, dl, VT, N->getOperand(0), |
| 15545 | DAG.getConstant(Cnt, dl, MVT::i32)); |
| 15546 | } |
| 15547 | break; |
| 15548 | |
| 15549 | case ISD::SRA: |
| 15550 | case ISD::SRL: |
| 15551 | if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) { |
| 15552 | unsigned VShiftOpc = |
| 15553 | (N->getOpcode() == ISD::SRA ? ARMISD::VSHRsIMM : ARMISD::VSHRuIMM); |
| 15554 | SDLoc dl(N); |
| 15555 | return DAG.getNode(VShiftOpc, dl, VT, N->getOperand(0), |
| 15556 | DAG.getConstant(Cnt, dl, MVT::i32)); |
| 15557 | } |
| 15558 | } |
| 15559 | return SDValue(); |
| 15560 | } |
| 15561 | |
| 15562 | // Look for a sign/zero/fpextend extend of a larger than legal load. This can be |
| 15563 | // split into multiple extending loads, which are simpler to deal with than an |
| 15564 | // arbitrary extend. For fp extends we use an integer extending load and a VCVTL |
| 15565 | // to convert the type to an f32. |
| 15566 | static SDValue PerformSplittingToWideningLoad(SDNode *N, SelectionDAG &DAG) { |
| 15567 | SDValue N0 = N->getOperand(0); |
| 15568 | if (N0.getOpcode() != ISD::LOAD) |
| 15569 | return SDValue(); |
| 15570 | LoadSDNode *LD = cast<LoadSDNode>(N0.getNode()); |
| 15571 | if (!LD->isSimple() || !N0.hasOneUse() || LD->isIndexed() || |
| 15572 | LD->getExtensionType() != ISD::NON_EXTLOAD) |
| 15573 | return SDValue(); |
| 15574 | EVT FromVT = LD->getValueType(0); |
| 15575 | EVT ToVT = N->getValueType(0); |
| 15576 | if (!ToVT.isVector()) |
| 15577 | return SDValue(); |
| 15578 | assert(FromVT.getVectorNumElements() == ToVT.getVectorNumElements()); |
| 15579 | EVT ToEltVT = ToVT.getVectorElementType(); |
| 15580 | EVT FromEltVT = FromVT.getVectorElementType(); |
| 15581 | |
| 15582 | unsigned NumElements = 0; |
| 15583 | if (ToEltVT == MVT::i32 && (FromEltVT == MVT::i16 || FromEltVT == MVT::i8)) |
| 15584 | NumElements = 4; |
| 15585 | if (ToEltVT == MVT::i16 && FromEltVT == MVT::i8) |
| 15586 | NumElements = 8; |
| 15587 | if (ToEltVT == MVT::f32 && FromEltVT == MVT::f16) |
| 15588 | NumElements = 4; |
| 15589 | if (NumElements == 0 || |
| 15590 | (FromEltVT != MVT::f16 && FromVT.getVectorNumElements() == NumElements) || |
| 15591 | FromVT.getVectorNumElements() % NumElements != 0 || |
| 15592 | !isPowerOf2_32(NumElements)) |
| 15593 | return SDValue(); |
| 15594 | |
| 15595 | LLVMContext &C = *DAG.getContext(); |
| 15596 | SDLoc DL(LD); |
| 15597 | // Details about the old load |
| 15598 | SDValue Ch = LD->getChain(); |
| 15599 | SDValue BasePtr = LD->getBasePtr(); |
| 15600 | Align Alignment = LD->getOriginalAlign(); |
| 15601 | MachineMemOperand::Flags MMOFlags = LD->getMemOperand()->getFlags(); |
| 15602 | AAMDNodes AAInfo = LD->getAAInfo(); |
| 15603 | |
| 15604 | ISD::LoadExtType NewExtType = |
| 15605 | N->getOpcode() == ISD::SIGN_EXTEND ? ISD::SEXTLOAD : ISD::ZEXTLOAD; |
| 15606 | SDValue Offset = DAG.getUNDEF(BasePtr.getValueType()); |
| 15607 | EVT NewFromVT = EVT::getVectorVT( |
| 15608 | C, EVT::getIntegerVT(C, FromEltVT.getScalarSizeInBits()), NumElements); |
| 15609 | EVT NewToVT = EVT::getVectorVT( |
| 15610 | C, EVT::getIntegerVT(C, ToEltVT.getScalarSizeInBits()), NumElements); |
| 15611 | |
| 15612 | SmallVector<SDValue, 4> Loads; |
| 15613 | SmallVector<SDValue, 4> Chains; |
| 15614 | for (unsigned i = 0; i < FromVT.getVectorNumElements() / NumElements; i++) { |
| 15615 | unsigned NewOffset = (i * NewFromVT.getSizeInBits()) / 8; |
| 15616 | SDValue NewPtr = |
| 15617 | DAG.getObjectPtrOffset(DL, BasePtr, TypeSize::Fixed(NewOffset)); |
| 15618 | |
| 15619 | SDValue NewLoad = |
| 15620 | DAG.getLoad(ISD::UNINDEXED, NewExtType, NewToVT, DL, Ch, NewPtr, Offset, |
| 15621 | LD->getPointerInfo().getWithOffset(NewOffset), NewFromVT, |
| 15622 | Alignment, MMOFlags, AAInfo); |
| 15623 | Loads.push_back(NewLoad); |
| 15624 | Chains.push_back(SDValue(NewLoad.getNode(), 1)); |
| 15625 | } |
| 15626 | |
| 15627 | // Float truncs need to extended with VCVTB's into their floating point types. |
| 15628 | if (FromEltVT == MVT::f16) { |
| 15629 | SmallVector<SDValue, 4> Extends; |
| 15630 | |
| 15631 | for (unsigned i = 0; i < Loads.size(); i++) { |
| 15632 | SDValue LoadBC = |
| 15633 | DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, MVT::v8f16, Loads[i]); |
| 15634 | SDValue FPExt = DAG.getNode(ARMISD::VCVTL, DL, MVT::v4f32, LoadBC, |
| 15635 | DAG.getConstant(0, DL, MVT::i32)); |
| 15636 | Extends.push_back(FPExt); |
| 15637 | } |
| 15638 | |
| 15639 | Loads = Extends; |
| 15640 | } |
| 15641 | |
| 15642 | SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); |
| 15643 | DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewChain); |
| 15644 | return DAG.getNode(ISD::CONCAT_VECTORS, DL, ToVT, Loads); |
| 15645 | } |
| 15646 | |
| 15647 | /// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND, |
| 15648 | /// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND. |
| 15649 | static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG, |
| 15650 | const ARMSubtarget *ST) { |
| 15651 | SDValue N0 = N->getOperand(0); |
| 15652 | |
| 15653 | // Check for sign- and zero-extensions of vector extract operations of 8- and |
| 15654 | // 16-bit vector elements. NEON and MVE support these directly. They are |
| 15655 | // handled during DAG combining because type legalization will promote them |
| 15656 | // to 32-bit types and it is messy to recognize the operations after that. |
| 15657 | if ((ST->hasNEON() || ST->hasMVEIntegerOps()) && |
| 15658 | N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { |
| 15659 | SDValue Vec = N0.getOperand(0); |
| 15660 | SDValue Lane = N0.getOperand(1); |
| 15661 | EVT VT = N->getValueType(0); |
| 15662 | EVT EltVT = N0.getValueType(); |
| 15663 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| 15664 | |
| 15665 | if (VT == MVT::i32 && |
| 15666 | (EltVT == MVT::i8 || EltVT == MVT::i16) && |
| 15667 | TLI.isTypeLegal(Vec.getValueType()) && |
| 15668 | isa<ConstantSDNode>(Lane)) { |
| 15669 | |
| 15670 | unsigned Opc = 0; |
| 15671 | switch (N->getOpcode()) { |
| 15672 | default: llvm_unreachable("unexpected opcode" ); |
| 15673 | case ISD::SIGN_EXTEND: |
| 15674 | Opc = ARMISD::VGETLANEs; |
| 15675 | break; |
| 15676 | case ISD::ZERO_EXTEND: |
| 15677 | case ISD::ANY_EXTEND: |
| 15678 | Opc = ARMISD::VGETLANEu; |
| 15679 | break; |
| 15680 | } |
| 15681 | return DAG.getNode(Opc, SDLoc(N), VT, Vec, Lane); |
| 15682 | } |
| 15683 | } |
| 15684 | |
| 15685 | if (ST->hasMVEIntegerOps()) |
| 15686 | if (SDValue NewLoad = PerformSplittingToWideningLoad(N, DAG)) |
| 15687 | return NewLoad; |
| 15688 | |
| 15689 | return SDValue(); |
| 15690 | } |
| 15691 | |
| 15692 | static SDValue PerformFPExtendCombine(SDNode *N, SelectionDAG &DAG, |
| 15693 | const ARMSubtarget *ST) { |
| 15694 | if (ST->hasMVEFloatOps()) |
| 15695 | if (SDValue NewLoad = PerformSplittingToWideningLoad(N, DAG)) |
| 15696 | return NewLoad; |
| 15697 | |
| 15698 | return SDValue(); |
| 15699 | } |
| 15700 | |
| 15701 | /// PerformMinMaxCombine - Target-specific DAG combining for creating truncating |
| 15702 | /// saturates. |
| 15703 | static SDValue PerformMinMaxCombine(SDNode *N, SelectionDAG &DAG, |
| 15704 | const ARMSubtarget *ST) { |
| 15705 | EVT VT = N->getValueType(0); |
| 15706 | SDValue N0 = N->getOperand(0); |
| 15707 | if (!ST->hasMVEIntegerOps()) |
| 15708 | return SDValue(); |
| 15709 | |
| 15710 | if (SDValue V = PerformVQDMULHCombine(N, DAG)) |
| 15711 | return V; |
| 15712 | |
| 15713 | if (VT != MVT::v4i32 && VT != MVT::v8i16) |
| 15714 | return SDValue(); |
| 15715 | |
| 15716 | auto IsSignedSaturate = [&](SDNode *Min, SDNode *Max) { |
| 15717 | // Check one is a smin and the other is a smax |
| 15718 | if (Min->getOpcode() != ISD::SMIN) |
| 15719 | std::swap(Min, Max); |
| 15720 | if (Min->getOpcode() != ISD::SMIN || Max->getOpcode() != ISD::SMAX) |
| 15721 | return false; |
| 15722 | |
| 15723 | APInt SaturateC; |
| 15724 | if (VT == MVT::v4i32) |
| 15725 | SaturateC = APInt(32, (1 << 15) - 1, true); |
| 15726 | else //if (VT == MVT::v8i16) |
| 15727 | SaturateC = APInt(16, (1 << 7) - 1, true); |
| 15728 | |
| 15729 | APInt MinC, MaxC; |
| 15730 | if (!ISD::isConstantSplatVector(Min->getOperand(1).getNode(), MinC) || |
| 15731 | MinC != SaturateC) |
| 15732 | return false; |
| 15733 | if (!ISD::isConstantSplatVector(Max->getOperand(1).getNode(), MaxC) || |
| 15734 | MaxC != ~SaturateC) |
| 15735 | return false; |
| 15736 | return true; |
| 15737 | }; |
| 15738 | |
| 15739 | if (IsSignedSaturate(N, N0.getNode())) { |
| 15740 | SDLoc DL(N); |
| 15741 | MVT ExtVT, HalfVT; |
| 15742 | if (VT == MVT::v4i32) { |
| 15743 | HalfVT = MVT::v8i16; |
| 15744 | ExtVT = MVT::v4i16; |
| 15745 | } else { // if (VT == MVT::v8i16) |
| 15746 | HalfVT = MVT::v16i8; |
| 15747 | ExtVT = MVT::v8i8; |
| 15748 | } |
| 15749 | |
| 15750 | // Create a VQMOVNB with undef top lanes, then signed extended into the top |
| 15751 | // half. That extend will hopefully be removed if only the bottom bits are |
| 15752 | // demanded (though a truncating store, for example). |
| 15753 | SDValue VQMOVN = |
| 15754 | DAG.getNode(ARMISD::VQMOVNs, DL, HalfVT, DAG.getUNDEF(HalfVT), |
| 15755 | N0->getOperand(0), DAG.getConstant(0, DL, MVT::i32)); |
| 15756 | SDValue Bitcast = DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, VT, VQMOVN); |
| 15757 | return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Bitcast, |
| 15758 | DAG.getValueType(ExtVT)); |
| 15759 | } |
| 15760 | |
| 15761 | auto IsUnsignedSaturate = [&](SDNode *Min) { |
| 15762 | // For unsigned, we just need to check for <= 0xffff |
| 15763 | if (Min->getOpcode() != ISD::UMIN) |
| 15764 | return false; |
| 15765 | |
| 15766 | APInt SaturateC; |
| 15767 | if (VT == MVT::v4i32) |
| 15768 | SaturateC = APInt(32, (1 << 16) - 1, true); |
| 15769 | else //if (VT == MVT::v8i16) |
| 15770 | SaturateC = APInt(16, (1 << 8) - 1, true); |
| 15771 | |
| 15772 | APInt MinC; |
| 15773 | if (!ISD::isConstantSplatVector(Min->getOperand(1).getNode(), MinC) || |
| 15774 | MinC != SaturateC) |
| 15775 | return false; |
| 15776 | return true; |
| 15777 | }; |
| 15778 | |
| 15779 | if (IsUnsignedSaturate(N)) { |
| 15780 | SDLoc DL(N); |
| 15781 | MVT HalfVT; |
| 15782 | unsigned ExtConst; |
| 15783 | if (VT == MVT::v4i32) { |
| 15784 | HalfVT = MVT::v8i16; |
| 15785 | ExtConst = 0x0000FFFF; |
| 15786 | } else { //if (VT == MVT::v8i16) |
| 15787 | HalfVT = MVT::v16i8; |
| 15788 | ExtConst = 0x00FF; |
| 15789 | } |
| 15790 | |
| 15791 | // Create a VQMOVNB with undef top lanes, then ZExt into the top half with |
| 15792 | // an AND. That extend will hopefully be removed if only the bottom bits are |
| 15793 | // demanded (though a truncating store, for example). |
| 15794 | SDValue VQMOVN = |
| 15795 | DAG.getNode(ARMISD::VQMOVNu, DL, HalfVT, DAG.getUNDEF(HalfVT), N0, |
| 15796 | DAG.getConstant(0, DL, MVT::i32)); |
| 15797 | SDValue Bitcast = DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, VT, VQMOVN); |
| 15798 | return DAG.getNode(ISD::AND, DL, VT, Bitcast, |
| 15799 | DAG.getConstant(ExtConst, DL, VT)); |
| 15800 | } |
| 15801 | |
| 15802 | return SDValue(); |
| 15803 | } |
| 15804 | |
| 15805 | static const APInt *isPowerOf2Constant(SDValue V) { |
| 15806 | ConstantSDNode *C = dyn_cast<ConstantSDNode>(V); |
| 15807 | if (!C) |
| 15808 | return nullptr; |
| 15809 | const APInt *CV = &C->getAPIntValue(); |
| 15810 | return CV->isPowerOf2() ? CV : nullptr; |
| 15811 | } |
| 15812 | |
| 15813 | SDValue ARMTargetLowering::PerformCMOVToBFICombine(SDNode *CMOV, SelectionDAG &DAG) const { |
| 15814 | // If we have a CMOV, OR and AND combination such as: |
| 15815 | // if (x & CN) |
| 15816 | // y |= CM; |
| 15817 | // |
| 15818 | // And: |
| 15819 | // * CN is a single bit; |
| 15820 | // * All bits covered by CM are known zero in y |
| 15821 | // |
| 15822 | // Then we can convert this into a sequence of BFI instructions. This will |
| 15823 | // always be a win if CM is a single bit, will always be no worse than the |
| 15824 | // TST&OR sequence if CM is two bits, and for thumb will be no worse if CM is |
| 15825 | // three bits (due to the extra IT instruction). |
| 15826 | |
| 15827 | SDValue Op0 = CMOV->getOperand(0); |
| 15828 | SDValue Op1 = CMOV->getOperand(1); |
| 15829 | auto CCNode = cast<ConstantSDNode>(CMOV->getOperand(2)); |
| 15830 | auto CC = CCNode->getAPIntValue().getLimitedValue(); |
| 15831 | SDValue CmpZ = CMOV->getOperand(4); |
| 15832 | |
| 15833 | // The compare must be against zero. |
| 15834 | if (!isNullConstant(CmpZ->getOperand(1))) |
| 15835 | return SDValue(); |
| 15836 | |
| 15837 | assert(CmpZ->getOpcode() == ARMISD::CMPZ); |
| 15838 | SDValue And = CmpZ->getOperand(0); |
| 15839 | if (And->getOpcode() != ISD::AND) |
| 15840 | return SDValue(); |
| 15841 | const APInt *AndC = isPowerOf2Constant(And->getOperand(1)); |
| 15842 | if (!AndC) |
| 15843 | return SDValue(); |
| 15844 | SDValue X = And->getOperand(0); |
| 15845 | |
| 15846 | if (CC == ARMCC::EQ) { |
| 15847 | // We're performing an "equal to zero" compare. Swap the operands so we |
| 15848 | // canonicalize on a "not equal to zero" compare. |
| 15849 | std::swap(Op0, Op1); |
| 15850 | } else { |
| 15851 | assert(CC == ARMCC::NE && "How can a CMPZ node not be EQ or NE?" ); |
| 15852 | } |
| 15853 | |
| 15854 | if (Op1->getOpcode() != ISD::OR) |
| 15855 | return SDValue(); |
| 15856 | |
| 15857 | ConstantSDNode *OrC = dyn_cast<ConstantSDNode>(Op1->getOperand(1)); |
| 15858 | if (!OrC) |
| 15859 | return SDValue(); |
| 15860 | SDValue Y = Op1->getOperand(0); |
| 15861 | |
| 15862 | if (Op0 != Y) |
| 15863 | return SDValue(); |
| 15864 | |
| 15865 | // Now, is it profitable to continue? |
| 15866 | APInt OrCI = OrC->getAPIntValue(); |
| 15867 | unsigned Heuristic = Subtarget->isThumb() ? 3 : 2; |
| 15868 | if (OrCI.countPopulation() > Heuristic) |
| 15869 | return SDValue(); |
| 15870 | |
| 15871 | // Lastly, can we determine that the bits defined by OrCI |
| 15872 | // are zero in Y? |
| 15873 | KnownBits Known = DAG.computeKnownBits(Y); |
| 15874 | if ((OrCI & Known.Zero) != OrCI) |
| 15875 | return SDValue(); |
| 15876 | |
| 15877 | // OK, we can do the combine. |
| 15878 | SDValue V = Y; |
| 15879 | SDLoc dl(X); |
| 15880 | EVT VT = X.getValueType(); |
| 15881 | unsigned BitInX = AndC->logBase2(); |
| 15882 | |
| 15883 | if (BitInX != 0) { |
| 15884 | // We must shift X first. |
| 15885 | X = DAG.getNode(ISD::SRL, dl, VT, X, |
| 15886 | DAG.getConstant(BitInX, dl, VT)); |
| 15887 | } |
| 15888 | |
| 15889 | for (unsigned BitInY = 0, NumActiveBits = OrCI.getActiveBits(); |
| 15890 | BitInY < NumActiveBits; ++BitInY) { |
| 15891 | if (OrCI[BitInY] == 0) |
| 15892 | continue; |
| 15893 | APInt Mask(VT.getSizeInBits(), 0); |
| 15894 | Mask.setBit(BitInY); |
| 15895 | V = DAG.getNode(ARMISD::BFI, dl, VT, V, X, |
| 15896 | // Confusingly, the operand is an *inverted* mask. |
| 15897 | DAG.getConstant(~Mask, dl, VT)); |
| 15898 | } |
| 15899 | |
| 15900 | return V; |
| 15901 | } |
| 15902 | |
| 15903 | // Given N, the value controlling the conditional branch, search for the loop |
| 15904 | // intrinsic, returning it, along with how the value is used. We need to handle |
| 15905 | // patterns such as the following: |
| 15906 | // (brcond (xor (setcc (loop.decrement), 0, ne), 1), exit) |
| 15907 | // (brcond (setcc (loop.decrement), 0, eq), exit) |
| 15908 | // (brcond (setcc (loop.decrement), 0, ne), header) |
| 15909 | static SDValue SearchLoopIntrinsic(SDValue N, ISD::CondCode &CC, int &Imm, |
| 15910 | bool &Negate) { |
| 15911 | switch (N->getOpcode()) { |
| 15912 | default: |
| 15913 | break; |
| 15914 | case ISD::XOR: { |
| 15915 | if (!isa<ConstantSDNode>(N.getOperand(1))) |
| 15916 | return SDValue(); |
| 15917 | if (!cast<ConstantSDNode>(N.getOperand(1))->isOne()) |
| 15918 | return SDValue(); |
| 15919 | Negate = !Negate; |
| 15920 | return SearchLoopIntrinsic(N.getOperand(0), CC, Imm, Negate); |
| 15921 | } |
| 15922 | case ISD::SETCC: { |
| 15923 | auto *Const = dyn_cast<ConstantSDNode>(N.getOperand(1)); |
| 15924 | if (!Const) |
| 15925 | return SDValue(); |
| 15926 | if (Const->isNullValue()) |
| 15927 | Imm = 0; |
| 15928 | else if (Const->isOne()) |
| 15929 | Imm = 1; |
| 15930 | else |
| 15931 | return SDValue(); |
| 15932 | CC = cast<CondCodeSDNode>(N.getOperand(2))->get(); |
| 15933 | return SearchLoopIntrinsic(N->getOperand(0), CC, Imm, Negate); |
| 15934 | } |
| 15935 | case ISD::INTRINSIC_W_CHAIN: { |
| 15936 | unsigned IntOp = cast<ConstantSDNode>(N.getOperand(1))->getZExtValue(); |
| 15937 | if (IntOp != Intrinsic::test_set_loop_iterations && |
| 15938 | IntOp != Intrinsic::loop_decrement_reg) |
| 15939 | return SDValue(); |
| 15940 | return N; |
| 15941 | } |
| 15942 | } |
| 15943 | return SDValue(); |
| 15944 | } |
| 15945 | |
| 15946 | static SDValue PerformHWLoopCombine(SDNode *N, |
| 15947 | TargetLowering::DAGCombinerInfo &DCI, |
| 15948 | const ARMSubtarget *ST) { |
| 15949 | |
| 15950 | // The hwloop intrinsics that we're interested are used for control-flow, |
| 15951 | // either for entering or exiting the loop: |
| 15952 | // - test.set.loop.iterations will test whether its operand is zero. If it |
| 15953 | // is zero, the proceeding branch should not enter the loop. |
| 15954 | // - loop.decrement.reg also tests whether its operand is zero. If it is |
| 15955 | // zero, the proceeding branch should not branch back to the beginning of |
| 15956 | // the loop. |
| 15957 | // So here, we need to check that how the brcond is using the result of each |
| 15958 | // of the intrinsics to ensure that we're branching to the right place at the |
| 15959 | // right time. |
| 15960 | |
| 15961 | ISD::CondCode CC; |
| 15962 | SDValue Cond; |
| 15963 | int Imm = 1; |
| 15964 | bool Negate = false; |
| 15965 | SDValue Chain = N->getOperand(0); |
| 15966 | SDValue Dest; |
| 15967 | |
| 15968 | if (N->getOpcode() == ISD::BRCOND) { |
| 15969 | CC = ISD::SETEQ; |
| 15970 | Cond = N->getOperand(1); |
| 15971 | Dest = N->getOperand(2); |
| 15972 | } else { |
| 15973 | assert(N->getOpcode() == ISD::BR_CC && "Expected BRCOND or BR_CC!" ); |
| 15974 | CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); |
| 15975 | Cond = N->getOperand(2); |
| 15976 | Dest = N->getOperand(4); |
| 15977 | if (auto *Const = dyn_cast<ConstantSDNode>(N->getOperand(3))) { |
| 15978 | if (!Const->isOne() && !Const->isNullValue()) |
| 15979 | return SDValue(); |
| 15980 | Imm = Const->getZExtValue(); |
| 15981 | } else |
| 15982 | return SDValue(); |
| 15983 | } |
| 15984 | |
| 15985 | SDValue Int = SearchLoopIntrinsic(Cond, CC, Imm, Negate); |
| 15986 | if (!Int) |
| 15987 | return SDValue(); |
| 15988 | |
| 15989 | if (Negate) |
| 15990 | CC = ISD::getSetCCInverse(CC, /* Integer inverse */ MVT::i32); |
| 15991 | |
| 15992 | auto IsTrueIfZero = [](ISD::CondCode CC, int Imm) { |
| 15993 | return (CC == ISD::SETEQ && Imm == 0) || |
| 15994 | (CC == ISD::SETNE && Imm == 1) || |
| 15995 | (CC == ISD::SETLT && Imm == 1) || |
| 15996 | (CC == ISD::SETULT && Imm == 1); |
| 15997 | }; |
| 15998 | |
| 15999 | auto IsFalseIfZero = [](ISD::CondCode CC, int Imm) { |
| 16000 | return (CC == ISD::SETEQ && Imm == 1) || |
| 16001 | (CC == ISD::SETNE && Imm == 0) || |
| 16002 | (CC == ISD::SETGT && Imm == 0) || |
| 16003 | (CC == ISD::SETUGT && Imm == 0) || |
| 16004 | (CC == ISD::SETGE && Imm == 1) || |
| 16005 | (CC == ISD::SETUGE && Imm == 1); |
| 16006 | }; |
| 16007 | |
| 16008 | assert((IsTrueIfZero(CC, Imm) || IsFalseIfZero(CC, Imm)) && |
| 16009 | "unsupported condition" ); |
| 16010 | |
| 16011 | SDLoc dl(Int); |
| 16012 | SelectionDAG &DAG = DCI.DAG; |
| 16013 | SDValue Elements = Int.getOperand(2); |
| 16014 | unsigned IntOp = cast<ConstantSDNode>(Int->getOperand(1))->getZExtValue(); |
| 16015 | assert((N->hasOneUse() && N->use_begin()->getOpcode() == ISD::BR) |
| 16016 | && "expected single br user" ); |
| 16017 | SDNode *Br = *N->use_begin(); |
| 16018 | SDValue OtherTarget = Br->getOperand(1); |
| 16019 | |
| 16020 | // Update the unconditional branch to branch to the given Dest. |
| 16021 | auto UpdateUncondBr = [](SDNode *Br, SDValue Dest, SelectionDAG &DAG) { |
| 16022 | SDValue NewBrOps[] = { Br->getOperand(0), Dest }; |
| 16023 | SDValue NewBr = DAG.getNode(ISD::BR, SDLoc(Br), MVT::Other, NewBrOps); |
| 16024 | DAG.ReplaceAllUsesOfValueWith(SDValue(Br, 0), NewBr); |
| 16025 | }; |
| 16026 | |
| 16027 | if (IntOp == Intrinsic::test_set_loop_iterations) { |
| 16028 | SDValue Res; |
| 16029 | // We expect this 'instruction' to branch when the counter is zero. |
| 16030 | if (IsTrueIfZero(CC, Imm)) { |
| 16031 | SDValue Ops[] = { Chain, Elements, Dest }; |
| 16032 | Res = DAG.getNode(ARMISD::WLS, dl, MVT::Other, Ops); |
| 16033 | } else { |
| 16034 | // The logic is the reverse of what we need for WLS, so find the other |
| 16035 | // basic block target: the target of the proceeding br. |
| 16036 | UpdateUncondBr(Br, Dest, DAG); |
| 16037 | |
| 16038 | SDValue Ops[] = { Chain, Elements, OtherTarget }; |
| 16039 | Res = DAG.getNode(ARMISD::WLS, dl, MVT::Other, Ops); |
| 16040 | } |
| 16041 | DAG.ReplaceAllUsesOfValueWith(Int.getValue(1), Int.getOperand(0)); |
| 16042 | return Res; |
| 16043 | } else { |
| 16044 | SDValue Size = DAG.getTargetConstant( |
| 16045 | cast<ConstantSDNode>(Int.getOperand(3))->getZExtValue(), dl, MVT::i32); |
| 16046 | SDValue Args[] = { Int.getOperand(0), Elements, Size, }; |
| 16047 | SDValue LoopDec = DAG.getNode(ARMISD::LOOP_DEC, dl, |
| 16048 | DAG.getVTList(MVT::i32, MVT::Other), Args); |
| 16049 | DAG.ReplaceAllUsesWith(Int.getNode(), LoopDec.getNode()); |
| 16050 | |
| 16051 | // We expect this instruction to branch when the count is not zero. |
| 16052 | SDValue Target = IsFalseIfZero(CC, Imm) ? Dest : OtherTarget; |
| 16053 | |
| 16054 | // Update the unconditional branch to target the loop preheader if we've |
| 16055 | // found the condition has been reversed. |
| 16056 | if (Target == OtherTarget) |
| 16057 | UpdateUncondBr(Br, Dest, DAG); |
| 16058 | |
| 16059 | Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, |
| 16060 | SDValue(LoopDec.getNode(), 1), Chain); |
| 16061 | |
| 16062 | SDValue EndArgs[] = { Chain, SDValue(LoopDec.getNode(), 0), Target }; |
| 16063 | return DAG.getNode(ARMISD::LE, dl, MVT::Other, EndArgs); |
| 16064 | } |
| 16065 | return SDValue(); |
| 16066 | } |
| 16067 | |
| 16068 | /// PerformBRCONDCombine - Target-specific DAG combining for ARMISD::BRCOND. |
| 16069 | SDValue |
| 16070 | ARMTargetLowering::PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const { |
| 16071 | SDValue Cmp = N->getOperand(4); |
| 16072 | if (Cmp.getOpcode() != ARMISD::CMPZ) |
| 16073 | // Only looking at NE cases. |
| 16074 | return SDValue(); |
| 16075 | |
| 16076 | EVT VT = N->getValueType(0); |
| 16077 | SDLoc dl(N); |
| 16078 | SDValue LHS = Cmp.getOperand(0); |
| 16079 | SDValue RHS = Cmp.getOperand(1); |
| 16080 | SDValue Chain = N->getOperand(0); |
| 16081 | SDValue BB = N->getOperand(1); |
| 16082 | SDValue ARMcc = N->getOperand(2); |
| 16083 | ARMCC::CondCodes CC = |
| 16084 | (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue(); |
| 16085 | |
| 16086 | // (brcond Chain BB ne CPSR (cmpz (and (cmov 0 1 CC CPSR Cmp) 1) 0)) |
| 16087 | // -> (brcond Chain BB CC CPSR Cmp) |
| 16088 | if (CC == ARMCC::NE && LHS.getOpcode() == ISD::AND && LHS->hasOneUse() && |
| 16089 | LHS->getOperand(0)->getOpcode() == ARMISD::CMOV && |
| 16090 | LHS->getOperand(0)->hasOneUse()) { |
| 16091 | auto *LHS00C = dyn_cast<ConstantSDNode>(LHS->getOperand(0)->getOperand(0)); |
| 16092 | auto *LHS01C = dyn_cast<ConstantSDNode>(LHS->getOperand(0)->getOperand(1)); |
| 16093 | auto *LHS1C = dyn_cast<ConstantSDNode>(LHS->getOperand(1)); |
| 16094 | auto *RHSC = dyn_cast<ConstantSDNode>(RHS); |
| 16095 | if ((LHS00C && LHS00C->getZExtValue() == 0) && |
| 16096 | (LHS01C && LHS01C->getZExtValue() == 1) && |
| 16097 | (LHS1C && LHS1C->getZExtValue() == 1) && |
| 16098 | (RHSC && RHSC->getZExtValue() == 0)) { |
| 16099 | return DAG.getNode( |
| 16100 | ARMISD::BRCOND, dl, VT, Chain, BB, LHS->getOperand(0)->getOperand(2), |
| 16101 | LHS->getOperand(0)->getOperand(3), LHS->getOperand(0)->getOperand(4)); |
| 16102 | } |
| 16103 | } |
| 16104 | |
| 16105 | return SDValue(); |
| 16106 | } |
| 16107 | |
| 16108 | /// PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV. |
| 16109 | SDValue |
| 16110 | ARMTargetLowering::PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const { |
| 16111 | SDValue Cmp = N->getOperand(4); |
| 16112 | if (Cmp.getOpcode() != ARMISD::CMPZ) |
| 16113 | // Only looking at EQ and NE cases. |
| 16114 | return SDValue(); |
| 16115 | |
| 16116 | EVT VT = N->getValueType(0); |
| 16117 | SDLoc dl(N); |
| 16118 | SDValue LHS = Cmp.getOperand(0); |
| 16119 | SDValue RHS = Cmp.getOperand(1); |
| 16120 | SDValue FalseVal = N->getOperand(0); |
| 16121 | SDValue TrueVal = N->getOperand(1); |
| 16122 | SDValue ARMcc = N->getOperand(2); |
| 16123 | ARMCC::CondCodes CC = |
| 16124 | (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue(); |
| 16125 | |
| 16126 | // BFI is only available on V6T2+. |
| 16127 | if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops()) { |
| 16128 | SDValue R = PerformCMOVToBFICombine(N, DAG); |
| 16129 | if (R) |
| 16130 | return R; |
| 16131 | } |
| 16132 | |
| 16133 | // Simplify |
| 16134 | // mov r1, r0 |
| 16135 | // cmp r1, x |
| 16136 | // mov r0, y |
| 16137 | // moveq r0, x |
| 16138 | // to |
| 16139 | // cmp r0, x |
| 16140 | // movne r0, y |
| 16141 | // |
| 16142 | // mov r1, r0 |
| 16143 | // cmp r1, x |
| 16144 | // mov r0, x |
| 16145 | // movne r0, y |
| 16146 | // to |
| 16147 | // cmp r0, x |
| 16148 | // movne r0, y |
| 16149 | /// FIXME: Turn this into a target neutral optimization? |
| 16150 | SDValue Res; |
| 16151 | if (CC == ARMCC::NE && FalseVal == RHS && FalseVal != LHS) { |
| 16152 | Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, TrueVal, ARMcc, |
| 16153 | N->getOperand(3), Cmp); |
| 16154 | } else if (CC == ARMCC::EQ && TrueVal == RHS) { |
| 16155 | SDValue ARMcc; |
| 16156 | SDValue NewCmp = getARMCmp(LHS, RHS, ISD::SETNE, ARMcc, DAG, dl); |
| 16157 | Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, FalseVal, ARMcc, |
| 16158 | N->getOperand(3), NewCmp); |
| 16159 | } |
| 16160 | |
| 16161 | // (cmov F T ne CPSR (cmpz (cmov 0 1 CC CPSR Cmp) 0)) |
| 16162 | // -> (cmov F T CC CPSR Cmp) |
| 16163 | if (CC == ARMCC::NE && LHS.getOpcode() == ARMISD::CMOV && LHS->hasOneUse()) { |
| 16164 | auto *LHS0C = dyn_cast<ConstantSDNode>(LHS->getOperand(0)); |
| 16165 | auto *LHS1C = dyn_cast<ConstantSDNode>(LHS->getOperand(1)); |
| 16166 | auto *RHSC = dyn_cast<ConstantSDNode>(RHS); |
| 16167 | if ((LHS0C && LHS0C->getZExtValue() == 0) && |
| 16168 | (LHS1C && LHS1C->getZExtValue() == 1) && |
| 16169 | (RHSC && RHSC->getZExtValue() == 0)) { |
| 16170 | return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, |
| 16171 | LHS->getOperand(2), LHS->getOperand(3), |
| 16172 | LHS->getOperand(4)); |
| 16173 | } |
| 16174 | } |
| 16175 | |
| 16176 | if (!VT.isInteger()) |
| 16177 | return SDValue(); |
| 16178 | |
| 16179 | // Materialize a boolean comparison for integers so we can avoid branching. |
| 16180 | if (isNullConstant(FalseVal)) { |
| 16181 | if (CC == ARMCC::EQ && isOneConstant(TrueVal)) { |
| 16182 | if (!Subtarget->isThumb1Only() && Subtarget->hasV5TOps()) { |
| 16183 | // If x == y then x - y == 0 and ARM's CLZ will return 32, shifting it |
| 16184 | // right 5 bits will make that 32 be 1, otherwise it will be 0. |
| 16185 | // CMOV 0, 1, ==, (CMPZ x, y) -> SRL (CTLZ (SUB x, y)), 5 |
| 16186 | SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, LHS, RHS); |
| 16187 | Res = DAG.getNode(ISD::SRL, dl, VT, DAG.getNode(ISD::CTLZ, dl, VT, Sub), |
| 16188 | DAG.getConstant(5, dl, MVT::i32)); |
| 16189 | } else { |
| 16190 | // CMOV 0, 1, ==, (CMPZ x, y) -> |
| 16191 | // (ADDCARRY (SUB x, y), t:0, t:1) |
| 16192 | // where t = (SUBCARRY 0, (SUB x, y), 0) |
| 16193 | // |
| 16194 | // The SUBCARRY computes 0 - (x - y) and this will give a borrow when |
| 16195 | // x != y. In other words, a carry C == 1 when x == y, C == 0 |
| 16196 | // otherwise. |
| 16197 | // The final ADDCARRY computes |
| 16198 | // x - y + (0 - (x - y)) + C == C |
| 16199 | SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, LHS, RHS); |
| 16200 | SDVTList VTs = DAG.getVTList(VT, MVT::i32); |
| 16201 | SDValue Neg = DAG.getNode(ISD::USUBO, dl, VTs, FalseVal, Sub); |
| 16202 | // ISD::SUBCARRY returns a borrow but we want the carry here |
| 16203 | // actually. |
| 16204 | SDValue Carry = |
| 16205 | DAG.getNode(ISD::SUB, dl, MVT::i32, |
| 16206 | DAG.getConstant(1, dl, MVT::i32), Neg.getValue(1)); |
| 16207 | Res = DAG.getNode(ISD::ADDCARRY, dl, VTs, Sub, Neg, Carry); |
| 16208 | } |
| 16209 | } else if (CC == ARMCC::NE && !isNullConstant(RHS) && |
| 16210 | (!Subtarget->isThumb1Only() || isPowerOf2Constant(TrueVal))) { |
| 16211 | // This seems pointless but will allow us to combine it further below. |
| 16212 | // CMOV 0, z, !=, (CMPZ x, y) -> CMOV (SUBS x, y), z, !=, (SUBS x, y):1 |
| 16213 | SDValue Sub = |
| 16214 | DAG.getNode(ARMISD::SUBS, dl, DAG.getVTList(VT, MVT::i32), LHS, RHS); |
| 16215 | SDValue CPSRGlue = DAG.getCopyToReg(DAG.getEntryNode(), dl, ARM::CPSR, |
| 16216 | Sub.getValue(1), SDValue()); |
| 16217 | Res = DAG.getNode(ARMISD::CMOV, dl, VT, Sub, TrueVal, ARMcc, |
| 16218 | N->getOperand(3), CPSRGlue.getValue(1)); |
| 16219 | FalseVal = Sub; |
| 16220 | } |
| 16221 | } else if (isNullConstant(TrueVal)) { |
| 16222 | if (CC == ARMCC::EQ && !isNullConstant(RHS) && |
| 16223 | (!Subtarget->isThumb1Only() || isPowerOf2Constant(FalseVal))) { |
| 16224 | // This seems pointless but will allow us to combine it further below |
| 16225 | // Note that we change == for != as this is the dual for the case above. |
| 16226 | // CMOV z, 0, ==, (CMPZ x, y) -> CMOV (SUBS x, y), z, !=, (SUBS x, y):1 |
| 16227 | SDValue Sub = |
| 16228 | DAG.getNode(ARMISD::SUBS, dl, DAG.getVTList(VT, MVT::i32), LHS, RHS); |
| 16229 | SDValue CPSRGlue = DAG.getCopyToReg(DAG.getEntryNode(), dl, ARM::CPSR, |
| 16230 | Sub.getValue(1), SDValue()); |
| 16231 | Res = DAG.getNode(ARMISD::CMOV, dl, VT, Sub, FalseVal, |
| 16232 | DAG.getConstant(ARMCC::NE, dl, MVT::i32), |
| 16233 | N->getOperand(3), CPSRGlue.getValue(1)); |
| 16234 | FalseVal = Sub; |
| 16235 | } |
| 16236 | } |
| 16237 | |
| 16238 | // On Thumb1, the DAG above may be further combined if z is a power of 2 |
| 16239 | // (z == 2 ^ K). |
| 16240 | // CMOV (SUBS x, y), z, !=, (SUBS x, y):1 -> |
| 16241 | // t1 = (USUBO (SUB x, y), 1) |
| 16242 | // t2 = (SUBCARRY (SUB x, y), t1:0, t1:1) |
| 16243 | // Result = if K != 0 then (SHL t2:0, K) else t2:0 |
| 16244 | // |
| 16245 | // This also handles the special case of comparing against zero; it's |
| 16246 | // essentially, the same pattern, except there's no SUBS: |
| 16247 | // CMOV x, z, !=, (CMPZ x, 0) -> |
| 16248 | // t1 = (USUBO x, 1) |
| 16249 | // t2 = (SUBCARRY x, t1:0, t1:1) |
| 16250 | // Result = if K != 0 then (SHL t2:0, K) else t2:0 |
| 16251 | const APInt *TrueConst; |
| 16252 | if (Subtarget->isThumb1Only() && CC == ARMCC::NE && |
| 16253 | ((FalseVal.getOpcode() == ARMISD::SUBS && |
| 16254 | FalseVal.getOperand(0) == LHS && FalseVal.getOperand(1) == RHS) || |
| 16255 | (FalseVal == LHS && isNullConstant(RHS))) && |
| 16256 | (TrueConst = isPowerOf2Constant(TrueVal))) { |
| 16257 | SDVTList VTs = DAG.getVTList(VT, MVT::i32); |
| 16258 | unsigned ShiftAmount = TrueConst->logBase2(); |
| 16259 | if (ShiftAmount) |
| 16260 | TrueVal = DAG.getConstant(1, dl, VT); |
| 16261 | SDValue Subc = DAG.getNode(ISD::USUBO, dl, VTs, FalseVal, TrueVal); |
| 16262 | Res = DAG.getNode(ISD::SUBCARRY, dl, VTs, FalseVal, Subc, Subc.getValue(1)); |
| 16263 | |
| 16264 | if (ShiftAmount) |
| 16265 | Res = DAG.getNode(ISD::SHL, dl, VT, Res, |
| 16266 | DAG.getConstant(ShiftAmount, dl, MVT::i32)); |
| 16267 | } |
| 16268 | |
| 16269 | if (Res.getNode()) { |
| 16270 | KnownBits Known = DAG.computeKnownBits(SDValue(N,0)); |
| 16271 | // Capture demanded bits information that would be otherwise lost. |
| 16272 | if (Known.Zero == 0xfffffffe) |
| 16273 | Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, |
| 16274 | DAG.getValueType(MVT::i1)); |
| 16275 | else if (Known.Zero == 0xffffff00) |
| 16276 | Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, |
| 16277 | DAG.getValueType(MVT::i8)); |
| 16278 | else if (Known.Zero == 0xffff0000) |
| 16279 | Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, |
| 16280 | DAG.getValueType(MVT::i16)); |
| 16281 | } |
| 16282 | |
| 16283 | return Res; |
| 16284 | } |
| 16285 | |
| 16286 | static SDValue PerformBITCASTCombine(SDNode *N, SelectionDAG &DAG, |
| 16287 | const ARMSubtarget *ST) { |
| 16288 | SDValue Src = N->getOperand(0); |
| 16289 | EVT DstVT = N->getValueType(0); |
| 16290 | |
| 16291 | // Convert v4f32 bitcast (v4i32 vdup (i32)) -> v4f32 vdup (i32) under MVE. |
| 16292 | if (ST->hasMVEIntegerOps() && Src.getOpcode() == ARMISD::VDUP) { |
| 16293 | EVT SrcVT = Src.getValueType(); |
| 16294 | if (SrcVT.getScalarSizeInBits() == DstVT.getScalarSizeInBits()) |
| 16295 | return DAG.getNode(ARMISD::VDUP, SDLoc(N), DstVT, Src.getOperand(0)); |
| 16296 | } |
| 16297 | |
| 16298 | // We may have a bitcast of something that has already had this bitcast |
| 16299 | // combine performed on it, so skip past any VECTOR_REG_CASTs. |
| 16300 | while (Src.getOpcode() == ARMISD::VECTOR_REG_CAST) |
| 16301 | Src = Src.getOperand(0); |
| 16302 | |
| 16303 | // Bitcast from element-wise VMOV or VMVN doesn't need VREV if the VREV that |
| 16304 | // would be generated is at least the width of the element type. |
| 16305 | EVT SrcVT = Src.getValueType(); |
| 16306 | if ((Src.getOpcode() == ARMISD::VMOVIMM || |
| 16307 | Src.getOpcode() == ARMISD::VMVNIMM || |
| 16308 | Src.getOpcode() == ARMISD::VMOVFPIMM) && |
| 16309 | SrcVT.getScalarSizeInBits() <= DstVT.getScalarSizeInBits() && |
| 16310 | DAG.getDataLayout().isBigEndian()) |
| 16311 | return DAG.getNode(ARMISD::VECTOR_REG_CAST, SDLoc(N), DstVT, Src); |
| 16312 | |
| 16313 | return SDValue(); |
| 16314 | } |
| 16315 | |
| 16316 | SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N, |
| 16317 | DAGCombinerInfo &DCI) const { |
| 16318 | switch (N->getOpcode()) { |
| 16319 | default: break; |
| 16320 | case ISD::SELECT_CC: |
| 16321 | case ISD::SELECT: return PerformSELECTCombine(N, DCI, Subtarget); |
| 16322 | case ISD::VSELECT: return PerformVSELECTCombine(N, DCI, Subtarget); |
| 16323 | case ISD::ABS: return PerformABSCombine(N, DCI, Subtarget); |
| 16324 | case ARMISD::ADDE: return PerformADDECombine(N, DCI, Subtarget); |
| 16325 | case ARMISD::UMLAL: return PerformUMLALCombine(N, DCI.DAG, Subtarget); |
| 16326 | case ISD::ADD: return PerformADDCombine(N, DCI, Subtarget); |
| 16327 | case ISD::SUB: return PerformSUBCombine(N, DCI, Subtarget); |
| 16328 | case ISD::MUL: return PerformMULCombine(N, DCI, Subtarget); |
| 16329 | case ISD::OR: return PerformORCombine(N, DCI, Subtarget); |
| 16330 | case ISD::XOR: return PerformXORCombine(N, DCI, Subtarget); |
| 16331 | case ISD::AND: return PerformANDCombine(N, DCI, Subtarget); |
| 16332 | case ISD::BRCOND: |
| 16333 | case ISD::BR_CC: return PerformHWLoopCombine(N, DCI, Subtarget); |
| 16334 | case ARMISD::ADDC: |
| 16335 | case ARMISD::SUBC: return PerformAddcSubcCombine(N, DCI, Subtarget); |
| 16336 | case ARMISD::SUBE: return PerformAddeSubeCombine(N, DCI, Subtarget); |
| 16337 | case ARMISD::BFI: return PerformBFICombine(N, DCI); |
| 16338 | case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI, Subtarget); |
| 16339 | case ARMISD::VMOVDRR: return PerformVMOVDRRCombine(N, DCI.DAG); |
| 16340 | case ARMISD::VMOVhr: return PerformVMOVhrCombine(N, DCI); |
| 16341 | case ARMISD::VMOVrh: return PerformVMOVrhCombine(N, DCI); |
| 16342 | case ISD::STORE: return PerformSTORECombine(N, DCI, Subtarget); |
| 16343 | case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DCI, Subtarget); |
| 16344 | case ISD::INSERT_VECTOR_ELT: return PerformInsertEltCombine(N, DCI); |
| 16345 | case ISD::EXTRACT_VECTOR_ELT: return PerformExtractEltCombine(N, DCI); |
| 16346 | case ISD::VECTOR_SHUFFLE: return PerformVECTOR_SHUFFLECombine(N, DCI.DAG); |
| 16347 | case ARMISD::VDUPLANE: return PerformVDUPLANECombine(N, DCI, Subtarget); |
| 16348 | case ARMISD::VDUP: return PerformVDUPCombine(N, DCI, Subtarget); |
| 16349 | case ISD::FP_TO_SINT: |
| 16350 | case ISD::FP_TO_UINT: |
| 16351 | return PerformVCVTCombine(N, DCI.DAG, Subtarget); |
| 16352 | case ISD::FDIV: |
| 16353 | return PerformVDIVCombine(N, DCI.DAG, Subtarget); |
| 16354 | case ISD::INTRINSIC_WO_CHAIN: |
| 16355 | return PerformIntrinsicCombine(N, DCI); |
| 16356 | case ISD::SHL: |
| 16357 | case ISD::SRA: |
| 16358 | case ISD::SRL: |
| 16359 | return PerformShiftCombine(N, DCI, Subtarget); |
| 16360 | case ISD::SIGN_EXTEND: |
| 16361 | case ISD::ZERO_EXTEND: |
| 16362 | case ISD::ANY_EXTEND: |
| 16363 | return PerformExtendCombine(N, DCI.DAG, Subtarget); |
| 16364 | case ISD::FP_EXTEND: |
| 16365 | return PerformFPExtendCombine(N, DCI.DAG, Subtarget); |
| 16366 | case ISD::SMIN: |
| 16367 | case ISD::UMIN: |
| 16368 | case ISD::SMAX: |
| 16369 | case ISD::UMAX: |
| 16370 | return PerformMinMaxCombine(N, DCI.DAG, Subtarget); |
| 16371 | case ARMISD::CMOV: return PerformCMOVCombine(N, DCI.DAG); |
| 16372 | case ARMISD::BRCOND: return PerformBRCONDCombine(N, DCI.DAG); |
| 16373 | case ISD::LOAD: return PerformLOADCombine(N, DCI); |
| 16374 | case ARMISD::VLD1DUP: |
| 16375 | case ARMISD::VLD2DUP: |
| 16376 | case ARMISD::VLD3DUP: |
| 16377 | case ARMISD::VLD4DUP: |
| 16378 | return PerformVLDCombine(N, DCI); |
| 16379 | case ARMISD::BUILD_VECTOR: |
| 16380 | return PerformARMBUILD_VECTORCombine(N, DCI); |
| 16381 | case ISD::BITCAST: |
| 16382 | return PerformBITCASTCombine(N, DCI.DAG, Subtarget); |
| 16383 | case ARMISD::PREDICATE_CAST: |
| 16384 | return PerformPREDICATE_CASTCombine(N, DCI); |
| 16385 | case ARMISD::VECTOR_REG_CAST: |
| 16386 | return PerformVECTOR_REG_CASTCombine(N, DCI, Subtarget); |
| 16387 | case ARMISD::VCMP: |
| 16388 | return PerformVCMPCombine(N, DCI, Subtarget); |
| 16389 | case ISD::VECREDUCE_ADD: |
| 16390 | return PerformVECREDUCE_ADDCombine(N, DCI.DAG, Subtarget); |
| 16391 | case ARMISD::VMOVN: |
| 16392 | return PerformVMOVNCombine(N, DCI); |
| 16393 | case ARMISD::VQMOVNs: |
| 16394 | case ARMISD::VQMOVNu: |
| 16395 | return PerformVQMOVNCombine(N, DCI); |
| 16396 | case ARMISD::ASRL: |
| 16397 | case ARMISD::LSRL: |
| 16398 | case ARMISD::LSLL: |
| 16399 | return PerformLongShiftCombine(N, DCI.DAG); |
| 16400 | case ARMISD::SMULWB: { |
| 16401 | unsigned BitWidth = N->getValueType(0).getSizeInBits(); |
| 16402 | APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16); |
| 16403 | if (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI)) |
| 16404 | return SDValue(); |
| 16405 | break; |
| 16406 | } |
| 16407 | case ARMISD::SMULWT: { |
| 16408 | unsigned BitWidth = N->getValueType(0).getSizeInBits(); |
| 16409 | APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 16); |
| 16410 | if (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI)) |
| 16411 | return SDValue(); |
| 16412 | break; |
| 16413 | } |
| 16414 | case ARMISD::SMLALBB: |
| 16415 | case ARMISD::QADD16b: |
| 16416 | case ARMISD::QSUB16b: { |
| 16417 | unsigned BitWidth = N->getValueType(0).getSizeInBits(); |
| 16418 | APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16); |
| 16419 | if ((SimplifyDemandedBits(N->getOperand(0), DemandedMask, DCI)) || |
| 16420 | (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI))) |
| 16421 | return SDValue(); |
| 16422 | break; |
| 16423 | } |
| 16424 | case ARMISD::SMLALBT: { |
| 16425 | unsigned LowWidth = N->getOperand(0).getValueType().getSizeInBits(); |
| 16426 | APInt LowMask = APInt::getLowBitsSet(LowWidth, 16); |
| 16427 | unsigned HighWidth = N->getOperand(1).getValueType().getSizeInBits(); |
| 16428 | APInt HighMask = APInt::getHighBitsSet(HighWidth, 16); |
| 16429 | if ((SimplifyDemandedBits(N->getOperand(0), LowMask, DCI)) || |
| 16430 | (SimplifyDemandedBits(N->getOperand(1), HighMask, DCI))) |
| 16431 | return SDValue(); |
| 16432 | break; |
| 16433 | } |
| 16434 | case ARMISD::SMLALTB: { |
| 16435 | unsigned HighWidth = N->getOperand(0).getValueType().getSizeInBits(); |
| 16436 | APInt HighMask = APInt::getHighBitsSet(HighWidth, 16); |
| 16437 | unsigned LowWidth = N->getOperand(1).getValueType().getSizeInBits(); |
| 16438 | APInt LowMask = APInt::getLowBitsSet(LowWidth, 16); |
| 16439 | if ((SimplifyDemandedBits(N->getOperand(0), HighMask, DCI)) || |
| 16440 | (SimplifyDemandedBits(N->getOperand(1), LowMask, DCI))) |
| 16441 | return SDValue(); |
| 16442 | break; |
| 16443 | } |
| 16444 | case ARMISD::SMLALTT: { |
| 16445 | unsigned BitWidth = N->getValueType(0).getSizeInBits(); |
| 16446 | APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 16); |
| 16447 | if ((SimplifyDemandedBits(N->getOperand(0), DemandedMask, DCI)) || |
| 16448 | (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI))) |
| 16449 | return SDValue(); |
| 16450 | break; |
| 16451 | } |
| 16452 | case ARMISD::QADD8b: |
| 16453 | case ARMISD::QSUB8b: { |
| 16454 | unsigned BitWidth = N->getValueType(0).getSizeInBits(); |
| 16455 | APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 8); |
| 16456 | if ((SimplifyDemandedBits(N->getOperand(0), DemandedMask, DCI)) || |
| 16457 | (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI))) |
| 16458 | return SDValue(); |
| 16459 | break; |
| 16460 | } |
| 16461 | case ISD::INTRINSIC_VOID: |
| 16462 | case ISD::INTRINSIC_W_CHAIN: |
| 16463 | switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { |
| 16464 | case Intrinsic::arm_neon_vld1: |
| 16465 | case Intrinsic::arm_neon_vld1x2: |
| 16466 | case Intrinsic::arm_neon_vld1x3: |
| 16467 | case Intrinsic::arm_neon_vld1x4: |
| 16468 | case Intrinsic::arm_neon_vld2: |
| 16469 | case Intrinsic::arm_neon_vld3: |
| 16470 | case Intrinsic::arm_neon_vld4: |
| 16471 | case Intrinsic::arm_neon_vld2lane: |
| 16472 | case Intrinsic::arm_neon_vld3lane: |
| 16473 | case Intrinsic::arm_neon_vld4lane: |
| 16474 | case Intrinsic::arm_neon_vld2dup: |
| 16475 | case Intrinsic::arm_neon_vld3dup: |
| 16476 | case Intrinsic::arm_neon_vld4dup: |
| 16477 | case Intrinsic::arm_neon_vst1: |
| 16478 | case Intrinsic::arm_neon_vst1x2: |
| 16479 | case Intrinsic::arm_neon_vst1x3: |
| 16480 | case Intrinsic::arm_neon_vst1x4: |
| 16481 | case Intrinsic::arm_neon_vst2: |
| 16482 | case Intrinsic::arm_neon_vst3: |
| 16483 | case Intrinsic::arm_neon_vst4: |
| 16484 | case Intrinsic::arm_neon_vst2lane: |
| 16485 | case Intrinsic::arm_neon_vst3lane: |
| 16486 | case Intrinsic::arm_neon_vst4lane: |
| 16487 | return PerformVLDCombine(N, DCI); |
| 16488 | case Intrinsic::arm_mve_vld2q: |
| 16489 | case Intrinsic::arm_mve_vld4q: |
| 16490 | case Intrinsic::arm_mve_vst2q: |
| 16491 | case Intrinsic::arm_mve_vst4q: |
| 16492 | return PerformMVEVLDCombine(N, DCI); |
| 16493 | default: break; |
| 16494 | } |
| 16495 | break; |
| 16496 | } |
| 16497 | return SDValue(); |
| 16498 | } |
| 16499 | |
| 16500 | bool ARMTargetLowering::isDesirableToTransformToIntegerOp(unsigned Opc, |
| 16501 | EVT VT) const { |
| 16502 | return (VT == MVT::f32) && (Opc == ISD::LOAD || Opc == ISD::STORE); |
| 16503 | } |
| 16504 | |
| 16505 | bool ARMTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, unsigned, |
| 16506 | unsigned Alignment, |
| 16507 | MachineMemOperand::Flags, |
| 16508 | bool *Fast) const { |
| 16509 | // Depends what it gets converted into if the type is weird. |
| 16510 | if (!VT.isSimple()) |
| 16511 | return false; |
| 16512 | |
| 16513 | // The AllowsUnaligned flag models the SCTLR.A setting in ARM cpus |
| 16514 | bool AllowsUnaligned = Subtarget->allowsUnalignedMem(); |
| 16515 | auto Ty = VT.getSimpleVT().SimpleTy; |
| 16516 | |
| 16517 | if (Ty == MVT::i8 || Ty == MVT::i16 || Ty == MVT::i32) { |
| 16518 | // Unaligned access can use (for example) LRDB, LRDH, LDR |
| 16519 | if (AllowsUnaligned) { |
| 16520 | if (Fast) |
| 16521 | *Fast = Subtarget->hasV7Ops(); |
| 16522 | return true; |
| 16523 | } |
| 16524 | } |
| 16525 | |
| 16526 | if (Ty == MVT::f64 || Ty == MVT::v2f64) { |
| 16527 | // For any little-endian targets with neon, we can support unaligned ld/st |
| 16528 | // of D and Q (e.g. {D0,D1}) registers by using vld1.i8/vst1.i8. |
| 16529 | // A big-endian target may also explicitly support unaligned accesses |
| 16530 | if (Subtarget->hasNEON() && (AllowsUnaligned || Subtarget->isLittle())) { |
| 16531 | if (Fast) |
| 16532 | *Fast = true; |
| 16533 | return true; |
| 16534 | } |
| 16535 | } |
| 16536 | |
| 16537 | if (!Subtarget->hasMVEIntegerOps()) |
| 16538 | return false; |
| 16539 | |
| 16540 | // These are for predicates |
| 16541 | if ((Ty == MVT::v16i1 || Ty == MVT::v8i1 || Ty == MVT::v4i1)) { |
| 16542 | if (Fast) |
| 16543 | *Fast = true; |
| 16544 | return true; |
| 16545 | } |
| 16546 | |
| 16547 | // These are for truncated stores/narrowing loads. They are fine so long as |
| 16548 | // the alignment is at least the size of the item being loaded |
| 16549 | if ((Ty == MVT::v4i8 || Ty == MVT::v8i8 || Ty == MVT::v4i16) && |
| 16550 | Alignment >= VT.getScalarSizeInBits() / 8) { |
| 16551 | if (Fast) |
| 16552 | *Fast = true; |
| 16553 | return true; |
| 16554 | } |
| 16555 | |
| 16556 | // In little-endian MVE, the store instructions VSTRB.U8, VSTRH.U16 and |
| 16557 | // VSTRW.U32 all store the vector register in exactly the same format, and |
| 16558 | // differ only in the range of their immediate offset field and the required |
| 16559 | // alignment. So there is always a store that can be used, regardless of |
| 16560 | // actual type. |
| 16561 | // |
| 16562 | // For big endian, that is not the case. But can still emit a (VSTRB.U8; |
| 16563 | // VREV64.8) pair and get the same effect. This will likely be better than |
| 16564 | // aligning the vector through the stack. |
| 16565 | if (Ty == MVT::v16i8 || Ty == MVT::v8i16 || Ty == MVT::v8f16 || |
| 16566 | Ty == MVT::v4i32 || Ty == MVT::v4f32 || Ty == MVT::v2i64 || |
| 16567 | Ty == MVT::v2f64) { |
| 16568 | if (Fast) |
| 16569 | *Fast = true; |
| 16570 | return true; |
| 16571 | } |
| 16572 | |
| 16573 | return false; |
| 16574 | } |
| 16575 | |
| 16576 | |
| 16577 | EVT ARMTargetLowering::getOptimalMemOpType( |
| 16578 | const MemOp &Op, const AttributeList &FuncAttributes) const { |
| 16579 | // See if we can use NEON instructions for this... |
| 16580 | if ((Op.isMemcpy() || Op.isZeroMemset()) && Subtarget->hasNEON() && |
| 16581 | !FuncAttributes.hasFnAttribute(Attribute::NoImplicitFloat)) { |
| 16582 | bool Fast; |
| 16583 | if (Op.size() >= 16 && |
| 16584 | (Op.isAligned(Align(16)) || |
| 16585 | (allowsMisalignedMemoryAccesses(MVT::v2f64, 0, 1, |
| 16586 | MachineMemOperand::MONone, &Fast) && |
| 16587 | Fast))) { |
| 16588 | return MVT::v2f64; |
| 16589 | } else if (Op.size() >= 8 && |
| 16590 | (Op.isAligned(Align(8)) || |
| 16591 | (allowsMisalignedMemoryAccesses( |
| 16592 | MVT::f64, 0, 1, MachineMemOperand::MONone, &Fast) && |
| 16593 | Fast))) { |
| 16594 | return MVT::f64; |
| 16595 | } |
| 16596 | } |
| 16597 | |
| 16598 | // Let the target-independent logic figure it out. |
| 16599 | return MVT::Other; |
| 16600 | } |
| 16601 | |
| 16602 | // 64-bit integers are split into their high and low parts and held in two |
| 16603 | // different registers, so the trunc is free since the low register can just |
| 16604 | // be used. |
| 16605 | bool ARMTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const { |
| 16606 | if (!SrcTy->isIntegerTy() || !DstTy->isIntegerTy()) |
| 16607 | return false; |
| 16608 | unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); |
| 16609 | unsigned DestBits = DstTy->getPrimitiveSizeInBits(); |
| 16610 | return (SrcBits == 64 && DestBits == 32); |
| 16611 | } |
| 16612 | |
| 16613 | bool ARMTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const { |
| 16614 | if (SrcVT.isVector() || DstVT.isVector() || !SrcVT.isInteger() || |
| 16615 | !DstVT.isInteger()) |
| 16616 | return false; |
| 16617 | unsigned SrcBits = SrcVT.getSizeInBits(); |
| 16618 | unsigned DestBits = DstVT.getSizeInBits(); |
| 16619 | return (SrcBits == 64 && DestBits == 32); |
| 16620 | } |
| 16621 | |
| 16622 | bool ARMTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { |
| 16623 | if (Val.getOpcode() != ISD::LOAD) |
| 16624 | return false; |
| 16625 | |
| 16626 | EVT VT1 = Val.getValueType(); |
| 16627 | if (!VT1.isSimple() || !VT1.isInteger() || |
| 16628 | !VT2.isSimple() || !VT2.isInteger()) |
| 16629 | return false; |
| 16630 | |
| 16631 | switch (VT1.getSimpleVT().SimpleTy) { |
| 16632 | default: break; |
| 16633 | case MVT::i1: |
| 16634 | case MVT::i8: |
| 16635 | case MVT::i16: |
| 16636 | // 8-bit and 16-bit loads implicitly zero-extend to 32-bits. |
| 16637 | return true; |
| 16638 | } |
| 16639 | |
| 16640 | return false; |
| 16641 | } |
| 16642 | |
| 16643 | bool ARMTargetLowering::isFNegFree(EVT VT) const { |
| 16644 | if (!VT.isSimple()) |
| 16645 | return false; |
| 16646 | |
| 16647 | // There are quite a few FP16 instructions (e.g. VNMLA, VNMLS, etc.) that |
| 16648 | // negate values directly (fneg is free). So, we don't want to let the DAG |
| 16649 | // combiner rewrite fneg into xors and some other instructions. For f16 and |
| 16650 | // FullFP16 argument passing, some bitcast nodes may be introduced, |
| 16651 | // triggering this DAG combine rewrite, so we are avoiding that with this. |
| 16652 | switch (VT.getSimpleVT().SimpleTy) { |
| 16653 | default: break; |
| 16654 | case MVT::f16: |
| 16655 | return Subtarget->hasFullFP16(); |
| 16656 | } |
| 16657 | |
| 16658 | return false; |
| 16659 | } |
| 16660 | |
| 16661 | /// Check if Ext1 and Ext2 are extends of the same type, doubling the bitwidth |
| 16662 | /// of the vector elements. |
| 16663 | static bool (Value *Ext1, Value *Ext2) { |
| 16664 | auto areExtDoubled = [](Instruction *Ext) { |
| 16665 | return Ext->getType()->getScalarSizeInBits() == |
| 16666 | 2 * Ext->getOperand(0)->getType()->getScalarSizeInBits(); |
| 16667 | }; |
| 16668 | |
| 16669 | if (!match(Ext1, m_ZExtOrSExt(m_Value())) || |
| 16670 | !match(Ext2, m_ZExtOrSExt(m_Value())) || |
| 16671 | !areExtDoubled(cast<Instruction>(Ext1)) || |
| 16672 | !areExtDoubled(cast<Instruction>(Ext2))) |
| 16673 | return false; |
| 16674 | |
| 16675 | return true; |
| 16676 | } |
| 16677 | |
| 16678 | /// Check if sinking \p I's operands to I's basic block is profitable, because |
| 16679 | /// the operands can be folded into a target instruction, e.g. |
| 16680 | /// sext/zext can be folded into vsubl. |
| 16681 | bool ARMTargetLowering::shouldSinkOperands(Instruction *I, |
| 16682 | SmallVectorImpl<Use *> &Ops) const { |
| 16683 | if (!I->getType()->isVectorTy()) |
| 16684 | return false; |
| 16685 | |
| 16686 | if (Subtarget->hasNEON()) { |
| 16687 | switch (I->getOpcode()) { |
| 16688 | case Instruction::Sub: |
| 16689 | case Instruction::Add: { |
| 16690 | if (!areExtractExts(I->getOperand(0), I->getOperand(1))) |
| 16691 | return false; |
| 16692 | Ops.push_back(&I->getOperandUse(0)); |
| 16693 | Ops.push_back(&I->getOperandUse(1)); |
| 16694 | return true; |
| 16695 | } |
| 16696 | default: |
| 16697 | return false; |
| 16698 | } |
| 16699 | } |
| 16700 | |
| 16701 | if (!Subtarget->hasMVEIntegerOps()) |
| 16702 | return false; |
| 16703 | |
| 16704 | auto IsFMSMul = [&](Instruction *I) { |
| 16705 | if (!I->hasOneUse()) |
| 16706 | return false; |
| 16707 | auto *Sub = cast<Instruction>(*I->users().begin()); |
| 16708 | return Sub->getOpcode() == Instruction::FSub && Sub->getOperand(1) == I; |
| 16709 | }; |
| 16710 | auto IsFMS = [&](Instruction *I) { |
| 16711 | if (match(I->getOperand(0), m_FNeg(m_Value())) || |
| 16712 | match(I->getOperand(1), m_FNeg(m_Value()))) |
| 16713 | return true; |
| 16714 | return false; |
| 16715 | }; |
| 16716 | |
| 16717 | auto IsSinker = [&](Instruction *I, int Operand) { |
| 16718 | switch (I->getOpcode()) { |
| 16719 | case Instruction::Add: |
| 16720 | case Instruction::Mul: |
| 16721 | case Instruction::FAdd: |
| 16722 | case Instruction::ICmp: |
| 16723 | case Instruction::FCmp: |
| 16724 | return true; |
| 16725 | case Instruction::FMul: |
| 16726 | return !IsFMSMul(I); |
| 16727 | case Instruction::Sub: |
| 16728 | case Instruction::FSub: |
| 16729 | case Instruction::Shl: |
| 16730 | case Instruction::LShr: |
| 16731 | case Instruction::AShr: |
| 16732 | return Operand == 1; |
| 16733 | case Instruction::Call: |
| 16734 | if (auto *II = dyn_cast<IntrinsicInst>(I)) { |
| 16735 | switch (II->getIntrinsicID()) { |
| 16736 | case Intrinsic::fma: |
| 16737 | return !IsFMS(I); |
| 16738 | case Intrinsic::arm_mve_add_predicated: |
| 16739 | case Intrinsic::arm_mve_mul_predicated: |
| 16740 | case Intrinsic::arm_mve_qadd_predicated: |
| 16741 | case Intrinsic::arm_mve_hadd_predicated: |
| 16742 | case Intrinsic::arm_mve_vqdmull_predicated: |
| 16743 | case Intrinsic::arm_mve_qdmulh_predicated: |
| 16744 | case Intrinsic::arm_mve_qrdmulh_predicated: |
| 16745 | case Intrinsic::arm_mve_fma_predicated: |
| 16746 | return true; |
| 16747 | case Intrinsic::arm_mve_sub_predicated: |
| 16748 | case Intrinsic::arm_mve_qsub_predicated: |
| 16749 | case Intrinsic::arm_mve_hsub_predicated: |
| 16750 | return Operand == 1; |
| 16751 | default: |
| 16752 | return false; |
| 16753 | } |
| 16754 | } |
| 16755 | return false; |
| 16756 | default: |
| 16757 | return false; |
| 16758 | } |
| 16759 | }; |
| 16760 | |
| 16761 | for (auto OpIdx : enumerate(I->operands())) { |
| 16762 | Instruction *Op = dyn_cast<Instruction>(OpIdx.value().get()); |
| 16763 | // Make sure we are not already sinking this operand |
| 16764 | if (!Op || any_of(Ops, [&](Use *U) { return U->get() == Op; })) |
| 16765 | continue; |
| 16766 | |
| 16767 | Instruction *Shuffle = Op; |
| 16768 | if (Shuffle->getOpcode() == Instruction::BitCast) |
| 16769 | Shuffle = dyn_cast<Instruction>(Shuffle->getOperand(0)); |
| 16770 | // We are looking for a splat that can be sunk. |
| 16771 | if (!Shuffle || |
| 16772 | !match(Shuffle, m_Shuffle( |
| 16773 | m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()), |
| 16774 | m_Undef(), m_ZeroMask()))) |
| 16775 | continue; |
| 16776 | if (!IsSinker(I, OpIdx.index())) |
| 16777 | continue; |
| 16778 | |
| 16779 | // All uses of the shuffle should be sunk to avoid duplicating it across gpr |
| 16780 | // and vector registers |
| 16781 | for (Use &U : Op->uses()) { |
| 16782 | Instruction *Insn = cast<Instruction>(U.getUser()); |
| 16783 | if (!IsSinker(Insn, U.getOperandNo())) |
| 16784 | return false; |
| 16785 | } |
| 16786 | |
| 16787 | Ops.push_back(&Shuffle->getOperandUse(0)); |
| 16788 | if (Shuffle != Op) |
| 16789 | Ops.push_back(&Op->getOperandUse(0)); |
| 16790 | Ops.push_back(&OpIdx.value()); |
| 16791 | } |
| 16792 | return true; |
| 16793 | } |
| 16794 | |
| 16795 | Type *ARMTargetLowering::shouldConvertSplatType(ShuffleVectorInst *SVI) const { |
| 16796 | if (!Subtarget->hasMVEIntegerOps()) |
| 16797 | return nullptr; |
| 16798 | Type *SVIType = SVI->getType(); |
| 16799 | Type *ScalarType = SVIType->getScalarType(); |
| 16800 | |
| 16801 | if (ScalarType->isFloatTy()) |
| 16802 | return Type::getInt32Ty(SVIType->getContext()); |
| 16803 | if (ScalarType->isHalfTy()) |
| 16804 | return Type::getInt16Ty(SVIType->getContext()); |
| 16805 | return nullptr; |
| 16806 | } |
| 16807 | |
| 16808 | bool ARMTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const { |
| 16809 | EVT VT = ExtVal.getValueType(); |
| 16810 | |
| 16811 | if (!isTypeLegal(VT)) |
| 16812 | return false; |
| 16813 | |
| 16814 | if (auto *Ld = dyn_cast<MaskedLoadSDNode>(ExtVal.getOperand(0))) { |
| 16815 | if (Ld->isExpandingLoad()) |
| 16816 | return false; |
| 16817 | } |
| 16818 | |
| 16819 | if (Subtarget->hasMVEIntegerOps()) |
| 16820 | return true; |
| 16821 | |
| 16822 | // Don't create a loadext if we can fold the extension into a wide/long |
| 16823 | // instruction. |
| 16824 | // If there's more than one user instruction, the loadext is desirable no |
| 16825 | // matter what. There can be two uses by the same instruction. |
| 16826 | if (ExtVal->use_empty() || |
| 16827 | !ExtVal->use_begin()->isOnlyUserOf(ExtVal.getNode())) |
| 16828 | return true; |
| 16829 | |
| 16830 | SDNode *U = *ExtVal->use_begin(); |
| 16831 | if ((U->getOpcode() == ISD::ADD || U->getOpcode() == ISD::SUB || |
| 16832 | U->getOpcode() == ISD::SHL || U->getOpcode() == ARMISD::VSHLIMM)) |
| 16833 | return false; |
| 16834 | |
| 16835 | return true; |
| 16836 | } |
| 16837 | |
| 16838 | bool ARMTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const { |
| 16839 | if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) |
| 16840 | return false; |
| 16841 | |
| 16842 | if (!isTypeLegal(EVT::getEVT(Ty1))) |
| 16843 | return false; |
| 16844 | |
| 16845 | assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop" ); |
| 16846 | |
| 16847 | // Assuming the caller doesn't have a zeroext or signext return parameter, |
| 16848 | // truncation all the way down to i1 is valid. |
| 16849 | return true; |
| 16850 | } |
| 16851 | |
| 16852 | int ARMTargetLowering::getScalingFactorCost(const DataLayout &DL, |
| 16853 | const AddrMode &AM, Type *Ty, |
| 16854 | unsigned AS) const { |
| 16855 | if (isLegalAddressingMode(DL, AM, Ty, AS)) { |
| 16856 | if (Subtarget->hasFPAO()) |
| 16857 | return AM.Scale < 0 ? 1 : 0; // positive offsets execute faster |
| 16858 | return 0; |
| 16859 | } |
| 16860 | return -1; |
| 16861 | } |
| 16862 | |
| 16863 | /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster |
| 16864 | /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be |
| 16865 | /// expanded to FMAs when this method returns true, otherwise fmuladd is |
| 16866 | /// expanded to fmul + fadd. |
| 16867 | /// |
| 16868 | /// ARM supports both fused and unfused multiply-add operations; we already |
| 16869 | /// lower a pair of fmul and fadd to the latter so it's not clear that there |
| 16870 | /// would be a gain or that the gain would be worthwhile enough to risk |
| 16871 | /// correctness bugs. |
| 16872 | /// |
| 16873 | /// For MVE, we set this to true as it helps simplify the need for some |
| 16874 | /// patterns (and we don't have the non-fused floating point instruction). |
| 16875 | bool ARMTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, |
| 16876 | EVT VT) const { |
| 16877 | if (!VT.isSimple()) |
| 16878 | return false; |
| 16879 | |
| 16880 | switch (VT.getSimpleVT().SimpleTy) { |
| 16881 | case MVT::v4f32: |
| 16882 | case MVT::v8f16: |
| 16883 | return Subtarget->hasMVEFloatOps(); |
| 16884 | case MVT::f16: |
| 16885 | return Subtarget->useFPVFMx16(); |
| 16886 | case MVT::f32: |
| 16887 | return Subtarget->useFPVFMx(); |
| 16888 | case MVT::f64: |
| 16889 | return Subtarget->useFPVFMx64(); |
| 16890 | default: |
| 16891 | break; |
| 16892 | } |
| 16893 | |
| 16894 | return false; |
| 16895 | } |
| 16896 | |
| 16897 | static bool isLegalT1AddressImmediate(int64_t V, EVT VT) { |
| 16898 | if (V < 0) |
| 16899 | return false; |
| 16900 | |
| 16901 | unsigned Scale = 1; |
| 16902 | switch (VT.getSimpleVT().SimpleTy) { |
| 16903 | case MVT::i1: |
| 16904 | case MVT::i8: |
| 16905 | // Scale == 1; |
| 16906 | break; |
| 16907 | case MVT::i16: |
| 16908 | // Scale == 2; |
| 16909 | Scale = 2; |
| 16910 | break; |
| 16911 | default: |
| 16912 | // On thumb1 we load most things (i32, i64, floats, etc) with a LDR |
| 16913 | // Scale == 4; |
| 16914 | Scale = 4; |
| 16915 | break; |
| 16916 | } |
| 16917 | |
| 16918 | if ((V & (Scale - 1)) != 0) |
| 16919 | return false; |
| 16920 | return isUInt<5>(V / Scale); |
| 16921 | } |
| 16922 | |
| 16923 | static bool isLegalT2AddressImmediate(int64_t V, EVT VT, |
| 16924 | const ARMSubtarget *Subtarget) { |
| 16925 | if (!VT.isInteger() && !VT.isFloatingPoint()) |
| 16926 | return false; |
| 16927 | if (VT.isVector() && Subtarget->hasNEON()) |
| 16928 | return false; |
| 16929 | if (VT.isVector() && VT.isFloatingPoint() && Subtarget->hasMVEIntegerOps() && |
| 16930 | !Subtarget->hasMVEFloatOps()) |
| 16931 | return false; |
| 16932 | |
| 16933 | bool IsNeg = false; |
| 16934 | if (V < 0) { |
| 16935 | IsNeg = true; |
| 16936 | V = -V; |
| 16937 | } |
| 16938 | |
| 16939 | unsigned NumBytes = std::max((unsigned)VT.getSizeInBits() / 8, 1U); |
| 16940 | |
| 16941 | // MVE: size * imm7 |
| 16942 | if (VT.isVector() && Subtarget->hasMVEIntegerOps()) { |
| 16943 | switch (VT.getSimpleVT().getVectorElementType().SimpleTy) { |
| 16944 | case MVT::i32: |
| 16945 | case MVT::f32: |
| 16946 | return isShiftedUInt<7,2>(V); |
| 16947 | case MVT::i16: |
| 16948 | case MVT::f16: |
| 16949 | return isShiftedUInt<7,1>(V); |
| 16950 | case MVT::i8: |
| 16951 | return isUInt<7>(V); |
| 16952 | default: |
| 16953 | return false; |
| 16954 | } |
| 16955 | } |
| 16956 | |
| 16957 | // half VLDR: 2 * imm8 |
| 16958 | if (VT.isFloatingPoint() && NumBytes == 2 && Subtarget->hasFPRegs16()) |
| 16959 | return isShiftedUInt<8, 1>(V); |
| 16960 | // VLDR and LDRD: 4 * imm8 |
| 16961 | if ((VT.isFloatingPoint() && Subtarget->hasVFP2Base()) || NumBytes == 8) |
| 16962 | return isShiftedUInt<8, 2>(V); |
| 16963 | |
| 16964 | if (NumBytes == 1 || NumBytes == 2 || NumBytes == 4) { |
| 16965 | // + imm12 or - imm8 |
| 16966 | if (IsNeg) |
| 16967 | return isUInt<8>(V); |
| 16968 | return isUInt<12>(V); |
| 16969 | } |
| 16970 | |
| 16971 | return false; |
| 16972 | } |
| 16973 | |
| 16974 | /// isLegalAddressImmediate - Return true if the integer value can be used |
| 16975 | /// as the offset of the target addressing mode for load / store of the |
| 16976 | /// given type. |
| 16977 | static bool isLegalAddressImmediate(int64_t V, EVT VT, |
| 16978 | const ARMSubtarget *Subtarget) { |
| 16979 | if (V == 0) |
| 16980 | return true; |
| 16981 | |
| 16982 | if (!VT.isSimple()) |
| 16983 | return false; |
| 16984 | |
| 16985 | if (Subtarget->isThumb1Only()) |
| 16986 | return isLegalT1AddressImmediate(V, VT); |
| 16987 | else if (Subtarget->isThumb2()) |
| 16988 | return isLegalT2AddressImmediate(V, VT, Subtarget); |
| 16989 | |
| 16990 | // ARM mode. |
| 16991 | if (V < 0) |
| 16992 | V = - V; |
| 16993 | switch (VT.getSimpleVT().SimpleTy) { |
| 16994 | default: return false; |
| 16995 | case MVT::i1: |
| 16996 | case MVT::i8: |
| 16997 | case MVT::i32: |
| 16998 | // +- imm12 |
| 16999 | return isUInt<12>(V); |
| 17000 | case MVT::i16: |
| 17001 | // +- imm8 |
| 17002 | return isUInt<8>(V); |
| 17003 | case MVT::f32: |
| 17004 | case MVT::f64: |
| 17005 | if (!Subtarget->hasVFP2Base()) // FIXME: NEON? |
| 17006 | return false; |
| 17007 | return isShiftedUInt<8, 2>(V); |
| 17008 | } |
| 17009 | } |
| 17010 | |
| 17011 | bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM, |
| 17012 | EVT VT) const { |
| 17013 | int Scale = AM.Scale; |
| 17014 | if (Scale < 0) |
| 17015 | return false; |
| 17016 | |
| 17017 | switch (VT.getSimpleVT().SimpleTy) { |
| 17018 | default: return false; |
| 17019 | case MVT::i1: |
| 17020 | case MVT::i8: |
| 17021 | case MVT::i16: |
| 17022 | case MVT::i32: |
| 17023 | if (Scale == 1) |
| 17024 | return true; |
| 17025 | // r + r << imm |
| 17026 | Scale = Scale & ~1; |
| 17027 | return Scale == 2 || Scale == 4 || Scale == 8; |
| 17028 | case MVT::i64: |
| 17029 | // FIXME: What are we trying to model here? ldrd doesn't have an r + r |
| 17030 | // version in Thumb mode. |
| 17031 | // r + r |
| 17032 | if (Scale == 1) |
| 17033 | return true; |
| 17034 | // r * 2 (this can be lowered to r + r). |
| 17035 | if (!AM.HasBaseReg && Scale == 2) |
| 17036 | return true; |
| 17037 | return false; |
| 17038 | case MVT::isVoid: |
| 17039 | // Note, we allow "void" uses (basically, uses that aren't loads or |
| 17040 | // stores), because arm allows folding a scale into many arithmetic |
| 17041 | // operations. This should be made more precise and revisited later. |
| 17042 | |
| 17043 | // Allow r << imm, but the imm has to be a multiple of two. |
| 17044 | if (Scale & 1) return false; |
| 17045 | return isPowerOf2_32(Scale); |
| 17046 | } |
| 17047 | } |
| 17048 | |
| 17049 | bool ARMTargetLowering::isLegalT1ScaledAddressingMode(const AddrMode &AM, |
| 17050 | EVT VT) const { |
| 17051 | const int Scale = AM.Scale; |
| 17052 | |
| 17053 | // Negative scales are not supported in Thumb1. |
| 17054 | if (Scale < 0) |
| 17055 | return false; |
| 17056 | |
| 17057 | // Thumb1 addressing modes do not support register scaling excepting the |
| 17058 | // following cases: |
| 17059 | // 1. Scale == 1 means no scaling. |
| 17060 | // 2. Scale == 2 this can be lowered to r + r if there is no base register. |
| 17061 | return (Scale == 1) || (!AM.HasBaseReg && Scale == 2); |
| 17062 | } |
| 17063 | |
| 17064 | /// isLegalAddressingMode - Return true if the addressing mode represented |
| 17065 | /// by AM is legal for this target, for a load/store of the specified type. |
| 17066 | bool ARMTargetLowering::isLegalAddressingMode(const DataLayout &DL, |
| 17067 | const AddrMode &AM, Type *Ty, |
| 17068 | unsigned AS, Instruction *I) const { |
| 17069 | EVT VT = getValueType(DL, Ty, true); |
| 17070 | if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget)) |
| 17071 | return false; |
| 17072 | |
| 17073 | // Can never fold addr of global into load/store. |
| 17074 | if (AM.BaseGV) |
| 17075 | return false; |
| 17076 | |
| 17077 | switch (AM.Scale) { |
| 17078 | case 0: // no scale reg, must be "r+i" or "r", or "i". |
| 17079 | break; |
| 17080 | default: |
| 17081 | // ARM doesn't support any R+R*scale+imm addr modes. |
| 17082 | if (AM.BaseOffs) |
| 17083 | return false; |
| 17084 | |
| 17085 | if (!VT.isSimple()) |
| 17086 | return false; |
| 17087 | |
| 17088 | if (Subtarget->isThumb1Only()) |
| 17089 | return isLegalT1ScaledAddressingMode(AM, VT); |
| 17090 | |
| 17091 | if (Subtarget->isThumb2()) |
| 17092 | return isLegalT2ScaledAddressingMode(AM, VT); |
| 17093 | |
| 17094 | int Scale = AM.Scale; |
| 17095 | switch (VT.getSimpleVT().SimpleTy) { |
| 17096 | default: return false; |
| 17097 | case MVT::i1: |
| 17098 | case MVT::i8: |
| 17099 | case MVT::i32: |
| 17100 | if (Scale < 0) Scale = -Scale; |
| 17101 | if (Scale == 1) |
| 17102 | return true; |
| 17103 | // r + r << imm |
| 17104 | return isPowerOf2_32(Scale & ~1); |
| 17105 | case MVT::i16: |
| 17106 | case MVT::i64: |
| 17107 | // r +/- r |
| 17108 | if (Scale == 1 || (AM.HasBaseReg && Scale == -1)) |
| 17109 | return true; |
| 17110 | // r * 2 (this can be lowered to r + r). |
| 17111 | if (!AM.HasBaseReg && Scale == 2) |
| 17112 | return true; |
| 17113 | return false; |
| 17114 | |
| 17115 | case MVT::isVoid: |
| 17116 | // Note, we allow "void" uses (basically, uses that aren't loads or |
| 17117 | // stores), because arm allows folding a scale into many arithmetic |
| 17118 | // operations. This should be made more precise and revisited later. |
| 17119 | |
| 17120 | // Allow r << imm, but the imm has to be a multiple of two. |
| 17121 | if (Scale & 1) return false; |
| 17122 | return isPowerOf2_32(Scale); |
| 17123 | } |
| 17124 | } |
| 17125 | return true; |
| 17126 | } |
| 17127 | |
| 17128 | /// isLegalICmpImmediate - Return true if the specified immediate is legal |
| 17129 | /// icmp immediate, that is the target has icmp instructions which can compare |
| 17130 | /// a register against the immediate without having to materialize the |
| 17131 | /// immediate into a register. |
| 17132 | bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const { |
| 17133 | // Thumb2 and ARM modes can use cmn for negative immediates. |
| 17134 | if (!Subtarget->isThumb()) |
| 17135 | return ARM_AM::getSOImmVal((uint32_t)Imm) != -1 || |
| 17136 | ARM_AM::getSOImmVal(-(uint32_t)Imm) != -1; |
| 17137 | if (Subtarget->isThumb2()) |
| 17138 | return ARM_AM::getT2SOImmVal((uint32_t)Imm) != -1 || |
| 17139 | ARM_AM::getT2SOImmVal(-(uint32_t)Imm) != -1; |
| 17140 | // Thumb1 doesn't have cmn, and only 8-bit immediates. |
| 17141 | return Imm >= 0 && Imm <= 255; |
| 17142 | } |
| 17143 | |
| 17144 | /// isLegalAddImmediate - Return true if the specified immediate is a legal add |
| 17145 | /// *or sub* immediate, that is the target has add or sub instructions which can |
| 17146 | /// add a register with the immediate without having to materialize the |
| 17147 | /// immediate into a register. |
| 17148 | bool ARMTargetLowering::isLegalAddImmediate(int64_t Imm) const { |
| 17149 | // Same encoding for add/sub, just flip the sign. |
| 17150 | int64_t AbsImm = std::abs(Imm); |
| 17151 | if (!Subtarget->isThumb()) |
| 17152 | return ARM_AM::getSOImmVal(AbsImm) != -1; |
| 17153 | if (Subtarget->isThumb2()) |
| 17154 | return ARM_AM::getT2SOImmVal(AbsImm) != -1; |
| 17155 | // Thumb1 only has 8-bit unsigned immediate. |
| 17156 | return AbsImm >= 0 && AbsImm <= 255; |
| 17157 | } |
| 17158 | |
| 17159 | static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT, |
| 17160 | bool isSEXTLoad, SDValue &Base, |
| 17161 | SDValue &Offset, bool &isInc, |
| 17162 | SelectionDAG &DAG) { |
| 17163 | if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) |
| 17164 | return false; |
| 17165 | |
| 17166 | if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) { |
| 17167 | // AddressingMode 3 |
| 17168 | Base = Ptr->getOperand(0); |
| 17169 | if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { |
| 17170 | int RHSC = (int)RHS->getZExtValue(); |
| 17171 | if (RHSC < 0 && RHSC > -256) { |
| 17172 | assert(Ptr->getOpcode() == ISD::ADD); |
| 17173 | isInc = false; |
| 17174 | Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0)); |
| 17175 | return true; |
| 17176 | } |
| 17177 | } |
| 17178 | isInc = (Ptr->getOpcode() == ISD::ADD); |
| 17179 | Offset = Ptr->getOperand(1); |
| 17180 | return true; |
| 17181 | } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) { |
| 17182 | // AddressingMode 2 |
| 17183 | if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { |
| 17184 | int RHSC = (int)RHS->getZExtValue(); |
| 17185 | if (RHSC < 0 && RHSC > -0x1000) { |
| 17186 | assert(Ptr->getOpcode() == ISD::ADD); |
| 17187 | isInc = false; |
| 17188 | Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0)); |
| 17189 | Base = Ptr->getOperand(0); |
| 17190 | return true; |
| 17191 | } |
| 17192 | } |
| 17193 | |
| 17194 | if (Ptr->getOpcode() == ISD::ADD) { |
| 17195 | isInc = true; |
| 17196 | ARM_AM::ShiftOpc ShOpcVal= |
| 17197 | ARM_AM::getShiftOpcForNode(Ptr->getOperand(0).getOpcode()); |
| 17198 | if (ShOpcVal != ARM_AM::no_shift) { |
| 17199 | Base = Ptr->getOperand(1); |
| 17200 | Offset = Ptr->getOperand(0); |
| 17201 | } else { |
| 17202 | Base = Ptr->getOperand(0); |
| 17203 | Offset = Ptr->getOperand(1); |
| 17204 | } |
| 17205 | return true; |
| 17206 | } |
| 17207 | |
| 17208 | isInc = (Ptr->getOpcode() == ISD::ADD); |
| 17209 | Base = Ptr->getOperand(0); |
| 17210 | Offset = Ptr->getOperand(1); |
| 17211 | return true; |
| 17212 | } |
| 17213 | |
| 17214 | // FIXME: Use VLDM / VSTM to emulate indexed FP load / store. |
| 17215 | return false; |
| 17216 | } |
| 17217 | |
| 17218 | static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT, |
| 17219 | bool isSEXTLoad, SDValue &Base, |
| 17220 | SDValue &Offset, bool &isInc, |
| 17221 | SelectionDAG &DAG) { |
| 17222 | if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) |
| 17223 | return false; |
| 17224 | |
| 17225 | Base = Ptr->getOperand(0); |
| 17226 | if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { |
| 17227 | int RHSC = (int)RHS->getZExtValue(); |
| 17228 | if (RHSC < 0 && RHSC > -0x100) { // 8 bits. |
| 17229 | assert(Ptr->getOpcode() == ISD::ADD); |
| 17230 | isInc = false; |
| 17231 | Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0)); |
| 17232 | return true; |
| 17233 | } else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero. |
| 17234 | isInc = Ptr->getOpcode() == ISD::ADD; |
| 17235 | Offset = DAG.getConstant(RHSC, SDLoc(Ptr), RHS->getValueType(0)); |
| 17236 | return true; |
| 17237 | } |
| 17238 | } |
| 17239 | |
| 17240 | return false; |
| 17241 | } |
| 17242 | |
| 17243 | static bool getMVEIndexedAddressParts(SDNode *Ptr, EVT VT, Align Alignment, |
| 17244 | bool isSEXTLoad, bool IsMasked, bool isLE, |
| 17245 | SDValue &Base, SDValue &Offset, |
| 17246 | bool &isInc, SelectionDAG &DAG) { |
| 17247 | if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) |
| 17248 | return false; |
| 17249 | if (!isa<ConstantSDNode>(Ptr->getOperand(1))) |
| 17250 | return false; |
| 17251 | |
| 17252 | // We allow LE non-masked loads to change the type (for example use a vldrb.8 |
| 17253 | // as opposed to a vldrw.32). This can allow extra addressing modes or |
| 17254 | // alignments for what is otherwise an equivalent instruction. |
| 17255 | bool CanChangeType = isLE && !IsMasked; |
| 17256 | |
| 17257 | ConstantSDNode *RHS = cast<ConstantSDNode>(Ptr->getOperand(1)); |
| 17258 | int RHSC = (int)RHS->getZExtValue(); |
| 17259 | |
| 17260 | auto IsInRange = [&](int RHSC, int Limit, int Scale) { |
| 17261 | if (RHSC < 0 && RHSC > -Limit * Scale && RHSC % Scale == 0) { |
| 17262 | assert(Ptr->getOpcode() == ISD::ADD); |
| 17263 | isInc = false; |
| 17264 | Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0)); |
| 17265 | return true; |
| 17266 | } else if (RHSC > 0 && RHSC < Limit * Scale && RHSC % Scale == 0) { |
| 17267 | isInc = Ptr->getOpcode() == ISD::ADD; |
| 17268 | Offset = DAG.getConstant(RHSC, SDLoc(Ptr), RHS->getValueType(0)); |
| 17269 | return true; |
| 17270 | } |
| 17271 | return false; |
| 17272 | }; |
| 17273 | |
| 17274 | // Try to find a matching instruction based on s/zext, Alignment, Offset and |
| 17275 | // (in BE/masked) type. |
| 17276 | Base = Ptr->getOperand(0); |
| 17277 | if (VT == MVT::v4i16) { |
| 17278 | if (Alignment >= 2 && IsInRange(RHSC, 0x80, 2)) |
| 17279 | return true; |
| 17280 | } else if (VT == MVT::v4i8 || VT == MVT::v8i8) { |
| 17281 | if (IsInRange(RHSC, 0x80, 1)) |
| 17282 | return true; |
| 17283 | } else if (Alignment >= 4 && |
| 17284 | (CanChangeType || VT == MVT::v4i32 || VT == MVT::v4f32) && |
| 17285 | IsInRange(RHSC, 0x80, 4)) |
| 17286 | return true; |
| 17287 | else if (Alignment >= 2 && |
| 17288 | (CanChangeType || VT == MVT::v8i16 || VT == MVT::v8f16) && |
| 17289 | IsInRange(RHSC, 0x80, 2)) |
| 17290 | return true; |
| 17291 | else if ((CanChangeType || VT == MVT::v16i8) && IsInRange(RHSC, 0x80, 1)) |
| 17292 | return true; |
| 17293 | return false; |
| 17294 | } |
| 17295 | |
| 17296 | /// getPreIndexedAddressParts - returns true by value, base pointer and |
| 17297 | /// offset pointer and addressing mode by reference if the node's address |
| 17298 | /// can be legally represented as pre-indexed load / store address. |
| 17299 | bool |
| 17300 | ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, |
| 17301 | SDValue &Offset, |
| 17302 | ISD::MemIndexedMode &AM, |
| 17303 | SelectionDAG &DAG) const { |
| 17304 | if (Subtarget->isThumb1Only()) |
| 17305 | return false; |
| 17306 | |
| 17307 | EVT VT; |
| 17308 | SDValue Ptr; |
| 17309 | Align Alignment; |
| 17310 | bool isSEXTLoad = false; |
| 17311 | bool IsMasked = false; |
| 17312 | if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { |
| 17313 | Ptr = LD->getBasePtr(); |
| 17314 | VT = LD->getMemoryVT(); |
| 17315 | Alignment = LD->getAlign(); |
| 17316 | isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; |
| 17317 | } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { |
| 17318 | Ptr = ST->getBasePtr(); |
| 17319 | VT = ST->getMemoryVT(); |
| 17320 | Alignment = ST->getAlign(); |
| 17321 | } else if (MaskedLoadSDNode *LD = dyn_cast<MaskedLoadSDNode>(N)) { |
| 17322 | Ptr = LD->getBasePtr(); |
| 17323 | VT = LD->getMemoryVT(); |
| 17324 | Alignment = LD->getAlign(); |
| 17325 | isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; |
| 17326 | IsMasked = true; |
| 17327 | } else if (MaskedStoreSDNode *ST = dyn_cast<MaskedStoreSDNode>(N)) { |
| 17328 | Ptr = ST->getBasePtr(); |
| 17329 | VT = ST->getMemoryVT(); |
| 17330 | Alignment = ST->getAlign(); |
| 17331 | IsMasked = true; |
| 17332 | } else |
| 17333 | return false; |
| 17334 | |
| 17335 | bool isInc; |
| 17336 | bool isLegal = false; |
| 17337 | if (VT.isVector()) |
| 17338 | isLegal = Subtarget->hasMVEIntegerOps() && |
| 17339 | getMVEIndexedAddressParts( |
| 17340 | Ptr.getNode(), VT, Alignment, isSEXTLoad, IsMasked, |
| 17341 | Subtarget->isLittle(), Base, Offset, isInc, DAG); |
| 17342 | else { |
| 17343 | if (Subtarget->isThumb2()) |
| 17344 | isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, |
| 17345 | Offset, isInc, DAG); |
| 17346 | else |
| 17347 | isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, |
| 17348 | Offset, isInc, DAG); |
| 17349 | } |
| 17350 | if (!isLegal) |
| 17351 | return false; |
| 17352 | |
| 17353 | AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC; |
| 17354 | return true; |
| 17355 | } |
| 17356 | |
| 17357 | /// getPostIndexedAddressParts - returns true by value, base pointer and |
| 17358 | /// offset pointer and addressing mode by reference if this node can be |
| 17359 | /// combined with a load / store to form a post-indexed load / store. |
| 17360 | bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op, |
| 17361 | SDValue &Base, |
| 17362 | SDValue &Offset, |
| 17363 | ISD::MemIndexedMode &AM, |
| 17364 | SelectionDAG &DAG) const { |
| 17365 | EVT VT; |
| 17366 | SDValue Ptr; |
| 17367 | Align Alignment; |
| 17368 | bool isSEXTLoad = false, isNonExt; |
| 17369 | bool IsMasked = false; |
| 17370 | if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { |
| 17371 | VT = LD->getMemoryVT(); |
| 17372 | Ptr = LD->getBasePtr(); |
| 17373 | Alignment = LD->getAlign(); |
| 17374 | isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; |
| 17375 | isNonExt = LD->getExtensionType() == ISD::NON_EXTLOAD; |
| 17376 | } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { |
| 17377 | VT = ST->getMemoryVT(); |
| 17378 | Ptr = ST->getBasePtr(); |
| 17379 | Alignment = ST->getAlign(); |
| 17380 | isNonExt = !ST->isTruncatingStore(); |
| 17381 | } else if (MaskedLoadSDNode *LD = dyn_cast<MaskedLoadSDNode>(N)) { |
| 17382 | VT = LD->getMemoryVT(); |
| 17383 | Ptr = LD->getBasePtr(); |
| 17384 | Alignment = LD->getAlign(); |
| 17385 | isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; |
| 17386 | isNonExt = LD->getExtensionType() == ISD::NON_EXTLOAD; |
| 17387 | IsMasked = true; |
| 17388 | } else if (MaskedStoreSDNode *ST = dyn_cast<MaskedStoreSDNode>(N)) { |
| 17389 | VT = ST->getMemoryVT(); |
| 17390 | Ptr = ST->getBasePtr(); |
| 17391 | Alignment = ST->getAlign(); |
| 17392 | isNonExt = !ST->isTruncatingStore(); |
| 17393 | IsMasked = true; |
| 17394 | } else |
| 17395 | return false; |
| 17396 | |
| 17397 | if (Subtarget->isThumb1Only()) { |
| 17398 | // Thumb-1 can do a limited post-inc load or store as an updating LDM. It |
| 17399 | // must be non-extending/truncating, i32, with an offset of 4. |
| 17400 | assert(Op->getValueType(0) == MVT::i32 && "Non-i32 post-inc op?!" ); |
| 17401 | if (Op->getOpcode() != ISD::ADD || !isNonExt) |
| 17402 | return false; |
| 17403 | auto *RHS = dyn_cast<ConstantSDNode>(Op->getOperand(1)); |
| 17404 | if (!RHS || RHS->getZExtValue() != 4) |
| 17405 | return false; |
| 17406 | |
| 17407 | Offset = Op->getOperand(1); |
| 17408 | Base = Op->getOperand(0); |
| 17409 | AM = ISD::POST_INC; |
| 17410 | return true; |
| 17411 | } |
| 17412 | |
| 17413 | bool isInc; |
| 17414 | bool isLegal = false; |
| 17415 | if (VT.isVector()) |
| 17416 | isLegal = Subtarget->hasMVEIntegerOps() && |
| 17417 | getMVEIndexedAddressParts(Op, VT, Alignment, isSEXTLoad, IsMasked, |
| 17418 | Subtarget->isLittle(), Base, Offset, |
| 17419 | isInc, DAG); |
| 17420 | else { |
| 17421 | if (Subtarget->isThumb2()) |
| 17422 | isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, |
| 17423 | isInc, DAG); |
| 17424 | else |
| 17425 | isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, |
| 17426 | isInc, DAG); |
| 17427 | } |
| 17428 | if (!isLegal) |
| 17429 | return false; |
| 17430 | |
| 17431 | if (Ptr != Base) { |
| 17432 | // Swap base ptr and offset to catch more post-index load / store when |
| 17433 | // it's legal. In Thumb2 mode, offset must be an immediate. |
| 17434 | if (Ptr == Offset && Op->getOpcode() == ISD::ADD && |
| 17435 | !Subtarget->isThumb2()) |
| 17436 | std::swap(Base, Offset); |
| 17437 | |
| 17438 | // Post-indexed load / store update the base pointer. |
| 17439 | if (Ptr != Base) |
| 17440 | return false; |
| 17441 | } |
| 17442 | |
| 17443 | AM = isInc ? ISD::POST_INC : ISD::POST_DEC; |
| 17444 | return true; |
| 17445 | } |
| 17446 | |
| 17447 | void ARMTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, |
| 17448 | KnownBits &Known, |
| 17449 | const APInt &DemandedElts, |
| 17450 | const SelectionDAG &DAG, |
| 17451 | unsigned Depth) const { |
| 17452 | unsigned BitWidth = Known.getBitWidth(); |
| 17453 | Known.resetAll(); |
| 17454 | switch (Op.getOpcode()) { |
| 17455 | default: break; |
| 17456 | case ARMISD::ADDC: |
| 17457 | case ARMISD::ADDE: |
| 17458 | case ARMISD::SUBC: |
| 17459 | case ARMISD::SUBE: |
| 17460 | // Special cases when we convert a carry to a boolean. |
| 17461 | if (Op.getResNo() == 0) { |
| 17462 | SDValue LHS = Op.getOperand(0); |
| 17463 | SDValue RHS = Op.getOperand(1); |
| 17464 | // (ADDE 0, 0, C) will give us a single bit. |
| 17465 | if (Op->getOpcode() == ARMISD::ADDE && isNullConstant(LHS) && |
| 17466 | isNullConstant(RHS)) { |
| 17467 | Known.Zero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1); |
| 17468 | return; |
| 17469 | } |
| 17470 | } |
| 17471 | break; |
| 17472 | case ARMISD::CMOV: { |
| 17473 | // Bits are known zero/one if known on the LHS and RHS. |
| 17474 | Known = DAG.computeKnownBits(Op.getOperand(0), Depth+1); |
| 17475 | if (Known.isUnknown()) |
| 17476 | return; |
| 17477 | |
| 17478 | KnownBits KnownRHS = DAG.computeKnownBits(Op.getOperand(1), Depth+1); |
| 17479 | Known = KnownBits::commonBits(Known, KnownRHS); |
| 17480 | return; |
| 17481 | } |
| 17482 | case ISD::INTRINSIC_W_CHAIN: { |
| 17483 | ConstantSDNode *CN = cast<ConstantSDNode>(Op->getOperand(1)); |
| 17484 | Intrinsic::ID IntID = static_cast<Intrinsic::ID>(CN->getZExtValue()); |
| 17485 | switch (IntID) { |
| 17486 | default: return; |
| 17487 | case Intrinsic::arm_ldaex: |
| 17488 | case Intrinsic::arm_ldrex: { |
| 17489 | EVT VT = cast<MemIntrinsicSDNode>(Op)->getMemoryVT(); |
| 17490 | unsigned MemBits = VT.getScalarSizeInBits(); |
| 17491 | Known.Zero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits); |
| 17492 | return; |
| 17493 | } |
| 17494 | } |
| 17495 | } |
| 17496 | case ARMISD::BFI: { |
| 17497 | // Conservatively, we can recurse down the first operand |
| 17498 | // and just mask out all affected bits. |
| 17499 | Known = DAG.computeKnownBits(Op.getOperand(0), Depth + 1); |
| 17500 | |
| 17501 | // The operand to BFI is already a mask suitable for removing the bits it |
| 17502 | // sets. |
| 17503 | ConstantSDNode *CI = cast<ConstantSDNode>(Op.getOperand(2)); |
| 17504 | const APInt &Mask = CI->getAPIntValue(); |
| 17505 | Known.Zero &= Mask; |
| 17506 | Known.One &= Mask; |
| 17507 | return; |
| 17508 | } |
| 17509 | case ARMISD::VGETLANEs: |
| 17510 | case ARMISD::VGETLANEu: { |
| 17511 | const SDValue &SrcSV = Op.getOperand(0); |
| 17512 | EVT VecVT = SrcSV.getValueType(); |
| 17513 | assert(VecVT.isVector() && "VGETLANE expected a vector type" ); |
| 17514 | const unsigned NumSrcElts = VecVT.getVectorNumElements(); |
| 17515 | ConstantSDNode *Pos = cast<ConstantSDNode>(Op.getOperand(1).getNode()); |
| 17516 | assert(Pos->getAPIntValue().ult(NumSrcElts) && |
| 17517 | "VGETLANE index out of bounds" ); |
| 17518 | unsigned Idx = Pos->getZExtValue(); |
| 17519 | APInt DemandedElt = APInt::getOneBitSet(NumSrcElts, Idx); |
| 17520 | Known = DAG.computeKnownBits(SrcSV, DemandedElt, Depth + 1); |
| 17521 | |
| 17522 | EVT VT = Op.getValueType(); |
| 17523 | const unsigned DstSz = VT.getScalarSizeInBits(); |
| 17524 | const unsigned SrcSz = VecVT.getVectorElementType().getSizeInBits(); |
| 17525 | (void)SrcSz; |
| 17526 | assert(SrcSz == Known.getBitWidth()); |
| 17527 | assert(DstSz > SrcSz); |
| 17528 | if (Op.getOpcode() == ARMISD::VGETLANEs) |
| 17529 | Known = Known.sext(DstSz); |
| 17530 | else { |
| 17531 | Known = Known.zext(DstSz); |
| 17532 | } |
| 17533 | assert(DstSz == Known.getBitWidth()); |
| 17534 | break; |
| 17535 | } |
| 17536 | case ARMISD::VMOVrh: { |
| 17537 | KnownBits KnownOp = DAG.computeKnownBits(Op->getOperand(0), Depth + 1); |
| 17538 | assert(KnownOp.getBitWidth() == 16); |
| 17539 | Known = KnownOp.zext(32); |
| 17540 | break; |
| 17541 | } |
| 17542 | } |
| 17543 | } |
| 17544 | |
| 17545 | bool ARMTargetLowering::targetShrinkDemandedConstant( |
| 17546 | SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, |
| 17547 | TargetLoweringOpt &TLO) const { |
| 17548 | // Delay optimization, so we don't have to deal with illegal types, or block |
| 17549 | // optimizations. |
| 17550 | if (!TLO.LegalOps) |
| 17551 | return false; |
| 17552 | |
| 17553 | // Only optimize AND for now. |
| 17554 | if (Op.getOpcode() != ISD::AND) |
| 17555 | return false; |
| 17556 | |
| 17557 | EVT VT = Op.getValueType(); |
| 17558 | |
| 17559 | // Ignore vectors. |
| 17560 | if (VT.isVector()) |
| 17561 | return false; |
| 17562 | |
| 17563 | assert(VT == MVT::i32 && "Unexpected integer type" ); |
| 17564 | |
| 17565 | // Make sure the RHS really is a constant. |
| 17566 | ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); |
| 17567 | if (!C) |
| 17568 | return false; |
| 17569 | |
| 17570 | unsigned Mask = C->getZExtValue(); |
| 17571 | |
| 17572 | unsigned Demanded = DemandedBits.getZExtValue(); |
| 17573 | unsigned ShrunkMask = Mask & Demanded; |
| 17574 | unsigned ExpandedMask = Mask | ~Demanded; |
| 17575 | |
| 17576 | // If the mask is all zeros, let the target-independent code replace the |
| 17577 | // result with zero. |
| 17578 | if (ShrunkMask == 0) |
| 17579 | return false; |
| 17580 | |
| 17581 | // If the mask is all ones, erase the AND. (Currently, the target-independent |
| 17582 | // code won't do this, so we have to do it explicitly to avoid an infinite |
| 17583 | // loop in obscure cases.) |
| 17584 | if (ExpandedMask == ~0U) |
| 17585 | return TLO.CombineTo(Op, Op.getOperand(0)); |
| 17586 | |
| 17587 | auto IsLegalMask = [ShrunkMask, ExpandedMask](unsigned Mask) -> bool { |
| 17588 | return (ShrunkMask & Mask) == ShrunkMask && (~ExpandedMask & Mask) == 0; |
| 17589 | }; |
| 17590 | auto UseMask = [Mask, Op, VT, &TLO](unsigned NewMask) -> bool { |
| 17591 | if (NewMask == Mask) |
| 17592 | return true; |
| 17593 | SDLoc DL(Op); |
| 17594 | SDValue NewC = TLO.DAG.getConstant(NewMask, DL, VT); |
| 17595 | SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC); |
| 17596 | return TLO.CombineTo(Op, NewOp); |
| 17597 | }; |
| 17598 | |
| 17599 | // Prefer uxtb mask. |
| 17600 | if (IsLegalMask(0xFF)) |
| 17601 | return UseMask(0xFF); |
| 17602 | |
| 17603 | // Prefer uxth mask. |
| 17604 | if (IsLegalMask(0xFFFF)) |
| 17605 | return UseMask(0xFFFF); |
| 17606 | |
| 17607 | // [1, 255] is Thumb1 movs+ands, legal immediate for ARM/Thumb2. |
| 17608 | // FIXME: Prefer a contiguous sequence of bits for other optimizations. |
| 17609 | if (ShrunkMask < 256) |
| 17610 | return UseMask(ShrunkMask); |
| 17611 | |
| 17612 | // [-256, -2] is Thumb1 movs+bics, legal immediate for ARM/Thumb2. |
| 17613 | // FIXME: Prefer a contiguous sequence of bits for other optimizations. |
| 17614 | if ((int)ExpandedMask <= -2 && (int)ExpandedMask >= -256) |
| 17615 | return UseMask(ExpandedMask); |
| 17616 | |
| 17617 | // Potential improvements: |
| 17618 | // |
| 17619 | // We could try to recognize lsls+lsrs or lsrs+lsls pairs here. |
| 17620 | // We could try to prefer Thumb1 immediates which can be lowered to a |
| 17621 | // two-instruction sequence. |
| 17622 | // We could try to recognize more legal ARM/Thumb2 immediates here. |
| 17623 | |
| 17624 | return false; |
| 17625 | } |
| 17626 | |
| 17627 | bool ARMTargetLowering::SimplifyDemandedBitsForTargetNode( |
| 17628 | SDValue Op, const APInt &OriginalDemandedBits, |
| 17629 | const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, |
| 17630 | unsigned Depth) const { |
| 17631 | unsigned Opc = Op.getOpcode(); |
| 17632 | |
| 17633 | switch (Opc) { |
| 17634 | case ARMISD::ASRL: |
| 17635 | case ARMISD::LSRL: { |
| 17636 | // If this is result 0 and the other result is unused, see if the demand |
| 17637 | // bits allow us to shrink this long shift into a standard small shift in |
| 17638 | // the opposite direction. |
| 17639 | if (Op.getResNo() == 0 && !Op->hasAnyUseOfValue(1) && |
| 17640 | isa<ConstantSDNode>(Op->getOperand(2))) { |
| 17641 | unsigned ShAmt = Op->getConstantOperandVal(2); |
| 17642 | if (ShAmt < 32 && OriginalDemandedBits.isSubsetOf( |
| 17643 | APInt::getAllOnesValue(32) << (32 - ShAmt))) |
| 17644 | return TLO.CombineTo( |
| 17645 | Op, TLO.DAG.getNode( |
| 17646 | ISD::SHL, SDLoc(Op), MVT::i32, Op.getOperand(1), |
| 17647 | TLO.DAG.getConstant(32 - ShAmt, SDLoc(Op), MVT::i32))); |
| 17648 | } |
| 17649 | break; |
| 17650 | } |
| 17651 | } |
| 17652 | |
| 17653 | return TargetLowering::SimplifyDemandedBitsForTargetNode( |
| 17654 | Op, OriginalDemandedBits, OriginalDemandedElts, Known, TLO, Depth); |
| 17655 | } |
| 17656 | |
| 17657 | //===----------------------------------------------------------------------===// |
| 17658 | // ARM Inline Assembly Support |
| 17659 | //===----------------------------------------------------------------------===// |
| 17660 | |
| 17661 | bool ARMTargetLowering::ExpandInlineAsm(CallInst *CI) const { |
| 17662 | // Looking for "rev" which is V6+. |
| 17663 | if (!Subtarget->hasV6Ops()) |
| 17664 | return false; |
| 17665 | |
| 17666 | InlineAsm *IA = cast<InlineAsm>(CI->getCalledOperand()); |
| 17667 | std::string AsmStr = IA->getAsmString(); |
| 17668 | SmallVector<StringRef, 4> AsmPieces; |
| 17669 | SplitString(AsmStr, AsmPieces, ";\n" ); |
| 17670 | |
| 17671 | switch (AsmPieces.size()) { |
| 17672 | default: return false; |
| 17673 | case 1: |
| 17674 | AsmStr = std::string(AsmPieces[0]); |
| 17675 | AsmPieces.clear(); |
| 17676 | SplitString(AsmStr, AsmPieces, " \t," ); |
| 17677 | |
| 17678 | // rev $0, $1 |
| 17679 | if (AsmPieces.size() == 3 && |
| 17680 | AsmPieces[0] == "rev" && AsmPieces[1] == "$0" && AsmPieces[2] == "$1" && |
| 17681 | IA->getConstraintString().compare(0, 4, "=l,l" ) == 0) { |
| 17682 | IntegerType *Ty = dyn_cast<IntegerType>(CI->getType()); |
| 17683 | if (Ty && Ty->getBitWidth() == 32) |
| 17684 | return IntrinsicLowering::LowerToByteSwap(CI); |
| 17685 | } |
| 17686 | break; |
| 17687 | } |
| 17688 | |
| 17689 | return false; |
| 17690 | } |
| 17691 | |
| 17692 | const char *ARMTargetLowering::LowerXConstraint(EVT ConstraintVT) const { |
| 17693 | // At this point, we have to lower this constraint to something else, so we |
| 17694 | // lower it to an "r" or "w". However, by doing this we will force the result |
| 17695 | // to be in register, while the X constraint is much more permissive. |
| 17696 | // |
| 17697 | // Although we are correct (we are free to emit anything, without |
| 17698 | // constraints), we might break use cases that would expect us to be more |
| 17699 | // efficient and emit something else. |
| 17700 | if (!Subtarget->hasVFP2Base()) |
| 17701 | return "r" ; |
| 17702 | if (ConstraintVT.isFloatingPoint()) |
| 17703 | return "w" ; |
| 17704 | if (ConstraintVT.isVector() && Subtarget->hasNEON() && |
| 17705 | (ConstraintVT.getSizeInBits() == 64 || |
| 17706 | ConstraintVT.getSizeInBits() == 128)) |
| 17707 | return "w" ; |
| 17708 | |
| 17709 | return "r" ; |
| 17710 | } |
| 17711 | |
| 17712 | /// getConstraintType - Given a constraint letter, return the type of |
| 17713 | /// constraint it is for this target. |
| 17714 | ARMTargetLowering::ConstraintType |
| 17715 | ARMTargetLowering::getConstraintType(StringRef Constraint) const { |
| 17716 | unsigned S = Constraint.size(); |
| 17717 | if (S == 1) { |
| 17718 | switch (Constraint[0]) { |
| 17719 | default: break; |
| 17720 | case 'l': return C_RegisterClass; |
| 17721 | case 'w': return C_RegisterClass; |
| 17722 | case 'h': return C_RegisterClass; |
| 17723 | case 'x': return C_RegisterClass; |
| 17724 | case 't': return C_RegisterClass; |
| 17725 | case 'j': return C_Immediate; // Constant for movw. |
| 17726 | // An address with a single base register. Due to the way we |
| 17727 | // currently handle addresses it is the same as an 'r' memory constraint. |
| 17728 | case 'Q': return C_Memory; |
| 17729 | } |
| 17730 | } else if (S == 2) { |
| 17731 | switch (Constraint[0]) { |
| 17732 | default: break; |
| 17733 | case 'T': return C_RegisterClass; |
| 17734 | // All 'U+' constraints are addresses. |
| 17735 | case 'U': return C_Memory; |
| 17736 | } |
| 17737 | } |
| 17738 | return TargetLowering::getConstraintType(Constraint); |
| 17739 | } |
| 17740 | |
| 17741 | /// Examine constraint type and operand type and determine a weight value. |
| 17742 | /// This object must already have been set up with the operand type |
| 17743 | /// and the current alternative constraint selected. |
| 17744 | TargetLowering::ConstraintWeight |
| 17745 | ARMTargetLowering::getSingleConstraintMatchWeight( |
| 17746 | AsmOperandInfo &info, const char *constraint) const { |
| 17747 | ConstraintWeight weight = CW_Invalid; |
| 17748 | Value *CallOperandVal = info.CallOperandVal; |
| 17749 | // If we don't have a value, we can't do a match, |
| 17750 | // but allow it at the lowest weight. |
| 17751 | if (!CallOperandVal) |
| 17752 | return CW_Default; |
| 17753 | Type *type = CallOperandVal->getType(); |
| 17754 | // Look at the constraint type. |
| 17755 | switch (*constraint) { |
| 17756 | default: |
| 17757 | weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); |
| 17758 | break; |
| 17759 | case 'l': |
| 17760 | if (type->isIntegerTy()) { |
| 17761 | if (Subtarget->isThumb()) |
| 17762 | weight = CW_SpecificReg; |
| 17763 | else |
| 17764 | weight = CW_Register; |
| 17765 | } |
| 17766 | break; |
| 17767 | case 'w': |
| 17768 | if (type->isFloatingPointTy()) |
| 17769 | weight = CW_Register; |
| 17770 | break; |
| 17771 | } |
| 17772 | return weight; |
| 17773 | } |
| 17774 | |
| 17775 | using RCPair = std::pair<unsigned, const TargetRegisterClass *>; |
| 17776 | |
| 17777 | RCPair ARMTargetLowering::getRegForInlineAsmConstraint( |
| 17778 | const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const { |
| 17779 | switch (Constraint.size()) { |
| 17780 | case 1: |
| 17781 | // GCC ARM Constraint Letters |
| 17782 | switch (Constraint[0]) { |
| 17783 | case 'l': // Low regs or general regs. |
| 17784 | if (Subtarget->isThumb()) |
| 17785 | return RCPair(0U, &ARM::tGPRRegClass); |
| 17786 | return RCPair(0U, &ARM::GPRRegClass); |
| 17787 | case 'h': // High regs or no regs. |
| 17788 | if (Subtarget->isThumb()) |
| 17789 | return RCPair(0U, &ARM::hGPRRegClass); |
| 17790 | break; |
| 17791 | case 'r': |
| 17792 | if (Subtarget->isThumb1Only()) |
| 17793 | return RCPair(0U, &ARM::tGPRRegClass); |
| 17794 | return RCPair(0U, &ARM::GPRRegClass); |
| 17795 | case 'w': |
| 17796 | if (VT == MVT::Other) |
| 17797 | break; |
| 17798 | if (VT == MVT::f32) |
| 17799 | return RCPair(0U, &ARM::SPRRegClass); |
| 17800 | if (VT.getSizeInBits() == 64) |
| 17801 | return RCPair(0U, &ARM::DPRRegClass); |
| 17802 | if (VT.getSizeInBits() == 128) |
| 17803 | return RCPair(0U, &ARM::QPRRegClass); |
| 17804 | break; |
| 17805 | case 'x': |
| 17806 | if (VT == MVT::Other) |
| 17807 | break; |
| 17808 | if (VT == MVT::f32) |
| 17809 | return RCPair(0U, &ARM::SPR_8RegClass); |
| 17810 | if (VT.getSizeInBits() == 64) |
| 17811 | return RCPair(0U, &ARM::DPR_8RegClass); |
| 17812 | if (VT.getSizeInBits() == 128) |
| 17813 | return RCPair(0U, &ARM::QPR_8RegClass); |
| 17814 | break; |
| 17815 | case 't': |
| 17816 | if (VT == MVT::Other) |
| 17817 | break; |
| 17818 | if (VT == MVT::f32 || VT == MVT::i32) |
| 17819 | return RCPair(0U, &ARM::SPRRegClass); |
| 17820 | if (VT.getSizeInBits() == 64) |
| 17821 | return RCPair(0U, &ARM::DPR_VFP2RegClass); |
| 17822 | if (VT.getSizeInBits() == 128) |
| 17823 | return RCPair(0U, &ARM::QPR_VFP2RegClass); |
| 17824 | break; |
| 17825 | } |
| 17826 | break; |
| 17827 | |
| 17828 | case 2: |
| 17829 | if (Constraint[0] == 'T') { |
| 17830 | switch (Constraint[1]) { |
| 17831 | default: |
| 17832 | break; |
| 17833 | case 'e': |
| 17834 | return RCPair(0U, &ARM::tGPREvenRegClass); |
| 17835 | case 'o': |
| 17836 | return RCPair(0U, &ARM::tGPROddRegClass); |
| 17837 | } |
| 17838 | } |
| 17839 | break; |
| 17840 | |
| 17841 | default: |
| 17842 | break; |
| 17843 | } |
| 17844 | |
| 17845 | if (StringRef("{cc}" ).equals_lower(Constraint)) |
| 17846 | return std::make_pair(unsigned(ARM::CPSR), &ARM::CCRRegClass); |
| 17847 | |
| 17848 | return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); |
| 17849 | } |
| 17850 | |
| 17851 | /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops |
| 17852 | /// vector. If it is invalid, don't add anything to Ops. |
| 17853 | void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op, |
| 17854 | std::string &Constraint, |
| 17855 | std::vector<SDValue>&Ops, |
| 17856 | SelectionDAG &DAG) const { |
| 17857 | SDValue Result; |
| 17858 | |
| 17859 | // Currently only support length 1 constraints. |
| 17860 | if (Constraint.length() != 1) return; |
| 17861 | |
| 17862 | char ConstraintLetter = Constraint[0]; |
| 17863 | switch (ConstraintLetter) { |
| 17864 | default: break; |
| 17865 | case 'j': |
| 17866 | case 'I': case 'J': case 'K': case 'L': |
| 17867 | case 'M': case 'N': case 'O': |
| 17868 | ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); |
| 17869 | if (!C) |
| 17870 | return; |
| 17871 | |
| 17872 | int64_t CVal64 = C->getSExtValue(); |
| 17873 | int CVal = (int) CVal64; |
| 17874 | // None of these constraints allow values larger than 32 bits. Check |
| 17875 | // that the value fits in an int. |
| 17876 | if (CVal != CVal64) |
| 17877 | return; |
| 17878 | |
| 17879 | switch (ConstraintLetter) { |
| 17880 | case 'j': |
| 17881 | // Constant suitable for movw, must be between 0 and |
| 17882 | // 65535. |
| 17883 | if (Subtarget->hasV6T2Ops() || (Subtarget->hasV8MBaselineOps())) |
| 17884 | if (CVal >= 0 && CVal <= 65535) |
| 17885 | break; |
| 17886 | return; |
| 17887 | case 'I': |
| 17888 | if (Subtarget->isThumb1Only()) { |
| 17889 | // This must be a constant between 0 and 255, for ADD |
| 17890 | // immediates. |
| 17891 | if (CVal >= 0 && CVal <= 255) |
| 17892 | break; |
| 17893 | } else if (Subtarget->isThumb2()) { |
| 17894 | // A constant that can be used as an immediate value in a |
| 17895 | // data-processing instruction. |
| 17896 | if (ARM_AM::getT2SOImmVal(CVal) != -1) |
| 17897 | break; |
| 17898 | } else { |
| 17899 | // A constant that can be used as an immediate value in a |
| 17900 | // data-processing instruction. |
| 17901 | if (ARM_AM::getSOImmVal(CVal) != -1) |
| 17902 | break; |
| 17903 | } |
| 17904 | return; |
| 17905 | |
| 17906 | case 'J': |
| 17907 | if (Subtarget->isThumb1Only()) { |
| 17908 | // This must be a constant between -255 and -1, for negated ADD |
| 17909 | // immediates. This can be used in GCC with an "n" modifier that |
| 17910 | // prints the negated value, for use with SUB instructions. It is |
| 17911 | // not useful otherwise but is implemented for compatibility. |
| 17912 | if (CVal >= -255 && CVal <= -1) |
| 17913 | break; |
| 17914 | } else { |
| 17915 | // This must be a constant between -4095 and 4095. It is not clear |
| 17916 | // what this constraint is intended for. Implemented for |
| 17917 | // compatibility with GCC. |
| 17918 | if (CVal >= -4095 && CVal <= 4095) |
| 17919 | break; |
| 17920 | } |
| 17921 | return; |
| 17922 | |
| 17923 | case 'K': |
| 17924 | if (Subtarget->isThumb1Only()) { |
| 17925 | // A 32-bit value where only one byte has a nonzero value. Exclude |
| 17926 | // zero to match GCC. This constraint is used by GCC internally for |
| 17927 | // constants that can be loaded with a move/shift combination. |
| 17928 | // It is not useful otherwise but is implemented for compatibility. |
| 17929 | if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(CVal)) |
| 17930 | break; |
| 17931 | } else if (Subtarget->isThumb2()) { |
| 17932 | // A constant whose bitwise inverse can be used as an immediate |
| 17933 | // value in a data-processing instruction. This can be used in GCC |
| 17934 | // with a "B" modifier that prints the inverted value, for use with |
| 17935 | // BIC and MVN instructions. It is not useful otherwise but is |
| 17936 | // implemented for compatibility. |
| 17937 | if (ARM_AM::getT2SOImmVal(~CVal) != -1) |
| 17938 | break; |
| 17939 | } else { |
| 17940 | // A constant whose bitwise inverse can be used as an immediate |
| 17941 | // value in a data-processing instruction. This can be used in GCC |
| 17942 | // with a "B" modifier that prints the inverted value, for use with |
| 17943 | // BIC and MVN instructions. It is not useful otherwise but is |
| 17944 | // implemented for compatibility. |
| 17945 | if (ARM_AM::getSOImmVal(~CVal) != -1) |
| 17946 | break; |
| 17947 | } |
| 17948 | return; |
| 17949 | |
| 17950 | case 'L': |
| 17951 | if (Subtarget->isThumb1Only()) { |
| 17952 | // This must be a constant between -7 and 7, |
| 17953 | // for 3-operand ADD/SUB immediate instructions. |
| 17954 | if (CVal >= -7 && CVal < 7) |
| 17955 | break; |
| 17956 | } else if (Subtarget->isThumb2()) { |
| 17957 | // A constant whose negation can be used as an immediate value in a |
| 17958 | // data-processing instruction. This can be used in GCC with an "n" |
| 17959 | // modifier that prints the negated value, for use with SUB |
| 17960 | // instructions. It is not useful otherwise but is implemented for |
| 17961 | // compatibility. |
| 17962 | if (ARM_AM::getT2SOImmVal(-CVal) != -1) |
| 17963 | break; |
| 17964 | } else { |
| 17965 | // A constant whose negation can be used as an immediate value in a |
| 17966 | // data-processing instruction. This can be used in GCC with an "n" |
| 17967 | // modifier that prints the negated value, for use with SUB |
| 17968 | // instructions. It is not useful otherwise but is implemented for |
| 17969 | // compatibility. |
| 17970 | if (ARM_AM::getSOImmVal(-CVal) != -1) |
| 17971 | break; |
| 17972 | } |
| 17973 | return; |
| 17974 | |
| 17975 | case 'M': |
| 17976 | if (Subtarget->isThumb1Only()) { |
| 17977 | // This must be a multiple of 4 between 0 and 1020, for |
| 17978 | // ADD sp + immediate. |
| 17979 | if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0)) |
| 17980 | break; |
| 17981 | } else { |
| 17982 | // A power of two or a constant between 0 and 32. This is used in |
| 17983 | // GCC for the shift amount on shifted register operands, but it is |
| 17984 | // useful in general for any shift amounts. |
| 17985 | if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0)) |
| 17986 | break; |
| 17987 | } |
| 17988 | return; |
| 17989 | |
| 17990 | case 'N': |
| 17991 | if (Subtarget->isThumb1Only()) { |
| 17992 | // This must be a constant between 0 and 31, for shift amounts. |
| 17993 | if (CVal >= 0 && CVal <= 31) |
| 17994 | break; |
| 17995 | } |
| 17996 | return; |
| 17997 | |
| 17998 | case 'O': |
| 17999 | if (Subtarget->isThumb1Only()) { |
| 18000 | // This must be a multiple of 4 between -508 and 508, for |
| 18001 | // ADD/SUB sp = sp + immediate. |
| 18002 | if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0)) |
| 18003 | break; |
| 18004 | } |
| 18005 | return; |
| 18006 | } |
| 18007 | Result = DAG.getTargetConstant(CVal, SDLoc(Op), Op.getValueType()); |
| 18008 | break; |
| 18009 | } |
| 18010 | |
| 18011 | if (Result.getNode()) { |
| 18012 | Ops.push_back(Result); |
| 18013 | return; |
| 18014 | } |
| 18015 | return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); |
| 18016 | } |
| 18017 | |
| 18018 | static RTLIB::Libcall getDivRemLibcall( |
| 18019 | const SDNode *N, MVT::SimpleValueType SVT) { |
| 18020 | assert((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM || |
| 18021 | N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) && |
| 18022 | "Unhandled Opcode in getDivRemLibcall" ); |
| 18023 | bool isSigned = N->getOpcode() == ISD::SDIVREM || |
| 18024 | N->getOpcode() == ISD::SREM; |
| 18025 | RTLIB::Libcall LC; |
| 18026 | switch (SVT) { |
| 18027 | default: llvm_unreachable("Unexpected request for libcall!" ); |
| 18028 | case MVT::i8: LC = isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break; |
| 18029 | case MVT::i16: LC = isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break; |
| 18030 | case MVT::i32: LC = isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break; |
| 18031 | case MVT::i64: LC = isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break; |
| 18032 | } |
| 18033 | return LC; |
| 18034 | } |
| 18035 | |
| 18036 | static TargetLowering::ArgListTy getDivRemArgList( |
| 18037 | const SDNode *N, LLVMContext *Context, const ARMSubtarget *Subtarget) { |
| 18038 | assert((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM || |
| 18039 | N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) && |
| 18040 | "Unhandled Opcode in getDivRemArgList" ); |
| 18041 | bool isSigned = N->getOpcode() == ISD::SDIVREM || |
| 18042 | N->getOpcode() == ISD::SREM; |
| 18043 | TargetLowering::ArgListTy Args; |
| 18044 | TargetLowering::ArgListEntry Entry; |
| 18045 | for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { |
| 18046 | EVT ArgVT = N->getOperand(i).getValueType(); |
| 18047 | Type *ArgTy = ArgVT.getTypeForEVT(*Context); |
| 18048 | Entry.Node = N->getOperand(i); |
| 18049 | Entry.Ty = ArgTy; |
| 18050 | Entry.IsSExt = isSigned; |
| 18051 | Entry.IsZExt = !isSigned; |
| 18052 | Args.push_back(Entry); |
| 18053 | } |
| 18054 | if (Subtarget->isTargetWindows() && Args.size() >= 2) |
| 18055 | std::swap(Args[0], Args[1]); |
| 18056 | return Args; |
| 18057 | } |
| 18058 | |
| 18059 | SDValue ARMTargetLowering::LowerDivRem(SDValue Op, SelectionDAG &DAG) const { |
| 18060 | assert((Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() || |
| 18061 | Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() || |
| 18062 | Subtarget->isTargetWindows()) && |
| 18063 | "Register-based DivRem lowering only" ); |
| 18064 | unsigned Opcode = Op->getOpcode(); |
| 18065 | assert((Opcode == ISD::SDIVREM || Opcode == ISD::UDIVREM) && |
| 18066 | "Invalid opcode for Div/Rem lowering" ); |
| 18067 | bool isSigned = (Opcode == ISD::SDIVREM); |
| 18068 | EVT VT = Op->getValueType(0); |
| 18069 | Type *Ty = VT.getTypeForEVT(*DAG.getContext()); |
| 18070 | SDLoc dl(Op); |
| 18071 | |
| 18072 | // If the target has hardware divide, use divide + multiply + subtract: |
| 18073 | // div = a / b |
| 18074 | // rem = a - b * div |
| 18075 | // return {div, rem} |
| 18076 | // This should be lowered into UDIV/SDIV + MLS later on. |
| 18077 | bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivideInThumbMode() |
| 18078 | : Subtarget->hasDivideInARMMode(); |
| 18079 | if (hasDivide && Op->getValueType(0).isSimple() && |
| 18080 | Op->getSimpleValueType(0) == MVT::i32) { |
| 18081 | unsigned DivOpcode = isSigned ? ISD::SDIV : ISD::UDIV; |
| 18082 | const SDValue Dividend = Op->getOperand(0); |
| 18083 | const SDValue Divisor = Op->getOperand(1); |
| 18084 | SDValue Div = DAG.getNode(DivOpcode, dl, VT, Dividend, Divisor); |
| 18085 | SDValue Mul = DAG.getNode(ISD::MUL, dl, VT, Div, Divisor); |
| 18086 | SDValue Rem = DAG.getNode(ISD::SUB, dl, VT, Dividend, Mul); |
| 18087 | |
| 18088 | SDValue Values[2] = {Div, Rem}; |
| 18089 | return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VT, VT), Values); |
| 18090 | } |
| 18091 | |
| 18092 | RTLIB::Libcall LC = getDivRemLibcall(Op.getNode(), |
| 18093 | VT.getSimpleVT().SimpleTy); |
| 18094 | SDValue InChain = DAG.getEntryNode(); |
| 18095 | |
| 18096 | TargetLowering::ArgListTy Args = getDivRemArgList(Op.getNode(), |
| 18097 | DAG.getContext(), |
| 18098 | Subtarget); |
| 18099 | |
| 18100 | SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC), |
| 18101 | getPointerTy(DAG.getDataLayout())); |
| 18102 | |
| 18103 | Type *RetTy = StructType::get(Ty, Ty); |
| 18104 | |
| 18105 | if (Subtarget->isTargetWindows()) |
| 18106 | InChain = WinDBZCheckDenominator(DAG, Op.getNode(), InChain); |
| 18107 | |
| 18108 | TargetLowering::CallLoweringInfo CLI(DAG); |
| 18109 | CLI.setDebugLoc(dl).setChain(InChain) |
| 18110 | .setCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args)) |
| 18111 | .setInRegister().setSExtResult(isSigned).setZExtResult(!isSigned); |
| 18112 | |
| 18113 | std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI); |
| 18114 | return CallInfo.first; |
| 18115 | } |
| 18116 | |
| 18117 | // Lowers REM using divmod helpers |
| 18118 | // see RTABI section 4.2/4.3 |
| 18119 | SDValue ARMTargetLowering::LowerREM(SDNode *N, SelectionDAG &DAG) const { |
| 18120 | // Build return types (div and rem) |
| 18121 | std::vector<Type*> RetTyParams; |
| 18122 | Type *RetTyElement; |
| 18123 | |
| 18124 | switch (N->getValueType(0).getSimpleVT().SimpleTy) { |
| 18125 | default: llvm_unreachable("Unexpected request for libcall!" ); |
| 18126 | case MVT::i8: RetTyElement = Type::getInt8Ty(*DAG.getContext()); break; |
| 18127 | case MVT::i16: RetTyElement = Type::getInt16Ty(*DAG.getContext()); break; |
| 18128 | case MVT::i32: RetTyElement = Type::getInt32Ty(*DAG.getContext()); break; |
| 18129 | case MVT::i64: RetTyElement = Type::getInt64Ty(*DAG.getContext()); break; |
| 18130 | } |
| 18131 | |
| 18132 | RetTyParams.push_back(RetTyElement); |
| 18133 | RetTyParams.push_back(RetTyElement); |
| 18134 | ArrayRef<Type*> ret = ArrayRef<Type*>(RetTyParams); |
| 18135 | Type *RetTy = StructType::get(*DAG.getContext(), ret); |
| 18136 | |
| 18137 | RTLIB::Libcall LC = getDivRemLibcall(N, N->getValueType(0).getSimpleVT(). |
| 18138 | SimpleTy); |
| 18139 | SDValue InChain = DAG.getEntryNode(); |
| 18140 | TargetLowering::ArgListTy Args = getDivRemArgList(N, DAG.getContext(), |
| 18141 | Subtarget); |
| 18142 | bool isSigned = N->getOpcode() == ISD::SREM; |
| 18143 | SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC), |
| 18144 | getPointerTy(DAG.getDataLayout())); |
| 18145 | |
| 18146 | if (Subtarget->isTargetWindows()) |
| 18147 | InChain = WinDBZCheckDenominator(DAG, N, InChain); |
| 18148 | |
| 18149 | // Lower call |
| 18150 | CallLoweringInfo CLI(DAG); |
| 18151 | CLI.setChain(InChain) |
| 18152 | .setCallee(CallingConv::ARM_AAPCS, RetTy, Callee, std::move(Args)) |
| 18153 | .setSExtResult(isSigned).setZExtResult(!isSigned).setDebugLoc(SDLoc(N)); |
| 18154 | std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); |
| 18155 | |
| 18156 | // Return second (rem) result operand (first contains div) |
| 18157 | SDNode *ResNode = CallResult.first.getNode(); |
| 18158 | assert(ResNode->getNumOperands() == 2 && "divmod should return two operands" ); |
| 18159 | return ResNode->getOperand(1); |
| 18160 | } |
| 18161 | |
| 18162 | SDValue |
| 18163 | ARMTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const { |
| 18164 | assert(Subtarget->isTargetWindows() && "unsupported target platform" ); |
| 18165 | SDLoc DL(Op); |
| 18166 | |
| 18167 | // Get the inputs. |
| 18168 | SDValue Chain = Op.getOperand(0); |
| 18169 | SDValue Size = Op.getOperand(1); |
| 18170 | |
| 18171 | if (DAG.getMachineFunction().getFunction().hasFnAttribute( |
| 18172 | "no-stack-arg-probe" )) { |
| 18173 | MaybeAlign Align = |
| 18174 | cast<ConstantSDNode>(Op.getOperand(2))->getMaybeAlignValue(); |
| 18175 | SDValue SP = DAG.getCopyFromReg(Chain, DL, ARM::SP, MVT::i32); |
| 18176 | Chain = SP.getValue(1); |
| 18177 | SP = DAG.getNode(ISD::SUB, DL, MVT::i32, SP, Size); |
| 18178 | if (Align) |
| 18179 | SP = |
| 18180 | DAG.getNode(ISD::AND, DL, MVT::i32, SP.getValue(0), |
| 18181 | DAG.getConstant(-(uint64_t)Align->value(), DL, MVT::i32)); |
| 18182 | Chain = DAG.getCopyToReg(Chain, DL, ARM::SP, SP); |
| 18183 | SDValue Ops[2] = { SP, Chain }; |
| 18184 | return DAG.getMergeValues(Ops, DL); |
| 18185 | } |
| 18186 | |
| 18187 | SDValue Words = DAG.getNode(ISD::SRL, DL, MVT::i32, Size, |
| 18188 | DAG.getConstant(2, DL, MVT::i32)); |
| 18189 | |
| 18190 | SDValue Flag; |
| 18191 | Chain = DAG.getCopyToReg(Chain, DL, ARM::R4, Words, Flag); |
| 18192 | Flag = Chain.getValue(1); |
| 18193 | |
| 18194 | SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); |
| 18195 | Chain = DAG.getNode(ARMISD::WIN__CHKSTK, DL, NodeTys, Chain, Flag); |
| 18196 | |
| 18197 | SDValue NewSP = DAG.getCopyFromReg(Chain, DL, ARM::SP, MVT::i32); |
| 18198 | Chain = NewSP.getValue(1); |
| 18199 | |
| 18200 | SDValue Ops[2] = { NewSP, Chain }; |
| 18201 | return DAG.getMergeValues(Ops, DL); |
| 18202 | } |
| 18203 | |
| 18204 | SDValue ARMTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const { |
| 18205 | bool IsStrict = Op->isStrictFPOpcode(); |
| 18206 | SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0); |
| 18207 | const unsigned DstSz = Op.getValueType().getSizeInBits(); |
| 18208 | const unsigned SrcSz = SrcVal.getValueType().getSizeInBits(); |
| 18209 | assert(DstSz > SrcSz && DstSz <= 64 && SrcSz >= 16 && |
| 18210 | "Unexpected type for custom-lowering FP_EXTEND" ); |
| 18211 | |
| 18212 | assert((!Subtarget->hasFP64() || !Subtarget->hasFPARMv8Base()) && |
| 18213 | "With both FP DP and 16, any FP conversion is legal!" ); |
| 18214 | |
| 18215 | assert(!(DstSz == 32 && Subtarget->hasFP16()) && |
| 18216 | "With FP16, 16 to 32 conversion is legal!" ); |
| 18217 | |
| 18218 | // Converting from 32 -> 64 is valid if we have FP64. |
| 18219 | if (SrcSz == 32 && DstSz == 64 && Subtarget->hasFP64()) { |
| 18220 | // FIXME: Remove this when we have strict fp instruction selection patterns |
| 18221 | if (IsStrict) { |
| 18222 | SDLoc Loc(Op); |
| 18223 | SDValue Result = DAG.getNode(ISD::FP_EXTEND, |
| 18224 | Loc, Op.getValueType(), SrcVal); |
| 18225 | return DAG.getMergeValues({Result, Op.getOperand(0)}, Loc); |
| 18226 | } |
| 18227 | return Op; |
| 18228 | } |
| 18229 | |
| 18230 | // Either we are converting from 16 -> 64, without FP16 and/or |
| 18231 | // FP.double-precision or without Armv8-fp. So we must do it in two |
| 18232 | // steps. |
| 18233 | // Or we are converting from 32 -> 64 without fp.double-precision or 16 -> 32 |
| 18234 | // without FP16. So we must do a function call. |
| 18235 | SDLoc Loc(Op); |
| 18236 | RTLIB::Libcall LC; |
| 18237 | MakeLibCallOptions CallOptions; |
| 18238 | SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue(); |
| 18239 | for (unsigned Sz = SrcSz; Sz <= 32 && Sz < DstSz; Sz *= 2) { |
| 18240 | bool Supported = (Sz == 16 ? Subtarget->hasFP16() : Subtarget->hasFP64()); |
| 18241 | MVT SrcVT = (Sz == 16 ? MVT::f16 : MVT::f32); |
| 18242 | MVT DstVT = (Sz == 16 ? MVT::f32 : MVT::f64); |
| 18243 | if (Supported) { |
| 18244 | if (IsStrict) { |
| 18245 | SrcVal = DAG.getNode(ISD::STRICT_FP_EXTEND, Loc, |
| 18246 | {DstVT, MVT::Other}, {Chain, SrcVal}); |
| 18247 | Chain = SrcVal.getValue(1); |
| 18248 | } else { |
| 18249 | SrcVal = DAG.getNode(ISD::FP_EXTEND, Loc, DstVT, SrcVal); |
| 18250 | } |
| 18251 | } else { |
| 18252 | LC = RTLIB::getFPEXT(SrcVT, DstVT); |
| 18253 | assert(LC != RTLIB::UNKNOWN_LIBCALL && |
| 18254 | "Unexpected type for custom-lowering FP_EXTEND" ); |
| 18255 | std::tie(SrcVal, Chain) = makeLibCall(DAG, LC, DstVT, SrcVal, CallOptions, |
| 18256 | Loc, Chain); |
| 18257 | } |
| 18258 | } |
| 18259 | |
| 18260 | return IsStrict ? DAG.getMergeValues({SrcVal, Chain}, Loc) : SrcVal; |
| 18261 | } |
| 18262 | |
| 18263 | SDValue ARMTargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const { |
| 18264 | bool IsStrict = Op->isStrictFPOpcode(); |
| 18265 | |
| 18266 | SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0); |
| 18267 | EVT SrcVT = SrcVal.getValueType(); |
| 18268 | EVT DstVT = Op.getValueType(); |
| 18269 | const unsigned DstSz = Op.getValueType().getSizeInBits(); |
| 18270 | const unsigned SrcSz = SrcVT.getSizeInBits(); |
| 18271 | (void)DstSz; |
| 18272 | assert(DstSz < SrcSz && SrcSz <= 64 && DstSz >= 16 && |
| 18273 | "Unexpected type for custom-lowering FP_ROUND" ); |
| 18274 | |
| 18275 | assert((!Subtarget->hasFP64() || !Subtarget->hasFPARMv8Base()) && |
| 18276 | "With both FP DP and 16, any FP conversion is legal!" ); |
| 18277 | |
| 18278 | SDLoc Loc(Op); |
| 18279 | |
| 18280 | // Instruction from 32 -> 16 if hasFP16 is valid |
| 18281 | if (SrcSz == 32 && Subtarget->hasFP16()) |
| 18282 | return Op; |
| 18283 | |
| 18284 | // Lib call from 32 -> 16 / 64 -> [32, 16] |
| 18285 | RTLIB::Libcall LC = RTLIB::getFPROUND(SrcVT, DstVT); |
| 18286 | assert(LC != RTLIB::UNKNOWN_LIBCALL && |
| 18287 | "Unexpected type for custom-lowering FP_ROUND" ); |
| 18288 | MakeLibCallOptions CallOptions; |
| 18289 | SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue(); |
| 18290 | SDValue Result; |
| 18291 | std::tie(Result, Chain) = makeLibCall(DAG, LC, DstVT, SrcVal, CallOptions, |
| 18292 | Loc, Chain); |
| 18293 | return IsStrict ? DAG.getMergeValues({Result, Chain}, Loc) : Result; |
| 18294 | } |
| 18295 | |
| 18296 | void ARMTargetLowering::lowerABS(SDNode *N, SmallVectorImpl<SDValue> &Results, |
| 18297 | SelectionDAG &DAG) const { |
| 18298 | assert(N->getValueType(0) == MVT::i64 && "Unexpected type (!= i64) on ABS." ); |
| 18299 | MVT HalfT = MVT::i32; |
| 18300 | SDLoc dl(N); |
| 18301 | SDValue Hi, Lo, Tmp; |
| 18302 | |
| 18303 | if (!isOperationLegalOrCustom(ISD::ADDCARRY, HalfT) || |
| 18304 | !isOperationLegalOrCustom(ISD::UADDO, HalfT)) |
| 18305 | return ; |
| 18306 | |
| 18307 | unsigned OpTypeBits = HalfT.getScalarSizeInBits(); |
| 18308 | SDVTList VTList = DAG.getVTList(HalfT, MVT::i1); |
| 18309 | |
| 18310 | Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(0), |
| 18311 | DAG.getConstant(0, dl, HalfT)); |
| 18312 | Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(0), |
| 18313 | DAG.getConstant(1, dl, HalfT)); |
| 18314 | |
| 18315 | Tmp = DAG.getNode(ISD::SRA, dl, HalfT, Hi, |
| 18316 | DAG.getConstant(OpTypeBits - 1, dl, |
| 18317 | getShiftAmountTy(HalfT, DAG.getDataLayout()))); |
| 18318 | Lo = DAG.getNode(ISD::UADDO, dl, VTList, Tmp, Lo); |
| 18319 | Hi = DAG.getNode(ISD::ADDCARRY, dl, VTList, Tmp, Hi, |
| 18320 | SDValue(Lo.getNode(), 1)); |
| 18321 | Hi = DAG.getNode(ISD::XOR, dl, HalfT, Tmp, Hi); |
| 18322 | Lo = DAG.getNode(ISD::XOR, dl, HalfT, Tmp, Lo); |
| 18323 | |
| 18324 | Results.push_back(Lo); |
| 18325 | Results.push_back(Hi); |
| 18326 | } |
| 18327 | |
| 18328 | bool |
| 18329 | ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { |
| 18330 | // The ARM target isn't yet aware of offsets. |
| 18331 | return false; |
| 18332 | } |
| 18333 | |
| 18334 | bool ARM::isBitFieldInvertedMask(unsigned v) { |
| 18335 | if (v == 0xffffffff) |
| 18336 | return false; |
| 18337 | |
| 18338 | // there can be 1's on either or both "outsides", all the "inside" |
| 18339 | // bits must be 0's |
| 18340 | return isShiftedMask_32(~v); |
| 18341 | } |
| 18342 | |
| 18343 | /// isFPImmLegal - Returns true if the target can instruction select the |
| 18344 | /// specified FP immediate natively. If false, the legalizer will |
| 18345 | /// materialize the FP immediate as a load from a constant pool. |
| 18346 | bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT, |
| 18347 | bool ForCodeSize) const { |
| 18348 | if (!Subtarget->hasVFP3Base()) |
| 18349 | return false; |
| 18350 | if (VT == MVT::f16 && Subtarget->hasFullFP16()) |
| 18351 | return ARM_AM::getFP16Imm(Imm) != -1; |
| 18352 | if (VT == MVT::f32 && Subtarget->hasFullFP16() && |
| 18353 | ARM_AM::getFP32FP16Imm(Imm) != -1) |
| 18354 | return true; |
| 18355 | if (VT == MVT::f32) |
| 18356 | return ARM_AM::getFP32Imm(Imm) != -1; |
| 18357 | if (VT == MVT::f64 && Subtarget->hasFP64()) |
| 18358 | return ARM_AM::getFP64Imm(Imm) != -1; |
| 18359 | return false; |
| 18360 | } |
| 18361 | |
| 18362 | /// getTgtMemIntrinsic - Represent NEON load and store intrinsics as |
| 18363 | /// MemIntrinsicNodes. The associated MachineMemOperands record the alignment |
| 18364 | /// specified in the intrinsic calls. |
| 18365 | bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, |
| 18366 | const CallInst &I, |
| 18367 | MachineFunction &MF, |
| 18368 | unsigned Intrinsic) const { |
| 18369 | switch (Intrinsic) { |
| 18370 | case Intrinsic::arm_neon_vld1: |
| 18371 | case Intrinsic::arm_neon_vld2: |
| 18372 | case Intrinsic::arm_neon_vld3: |
| 18373 | case Intrinsic::arm_neon_vld4: |
| 18374 | case Intrinsic::arm_neon_vld2lane: |
| 18375 | case Intrinsic::arm_neon_vld3lane: |
| 18376 | case Intrinsic::arm_neon_vld4lane: |
| 18377 | case Intrinsic::arm_neon_vld2dup: |
| 18378 | case Intrinsic::arm_neon_vld3dup: |
| 18379 | case Intrinsic::arm_neon_vld4dup: { |
| 18380 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 18381 | // Conservatively set memVT to the entire set of vectors loaded. |
| 18382 | auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); |
| 18383 | uint64_t NumElts = DL.getTypeSizeInBits(I.getType()) / 64; |
| 18384 | Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); |
| 18385 | Info.ptrVal = I.getArgOperand(0); |
| 18386 | Info.offset = 0; |
| 18387 | Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); |
| 18388 | Info.align = cast<ConstantInt>(AlignArg)->getMaybeAlignValue(); |
| 18389 | // volatile loads with NEON intrinsics not supported |
| 18390 | Info.flags = MachineMemOperand::MOLoad; |
| 18391 | return true; |
| 18392 | } |
| 18393 | case Intrinsic::arm_neon_vld1x2: |
| 18394 | case Intrinsic::arm_neon_vld1x3: |
| 18395 | case Intrinsic::arm_neon_vld1x4: { |
| 18396 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 18397 | // Conservatively set memVT to the entire set of vectors loaded. |
| 18398 | auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); |
| 18399 | uint64_t NumElts = DL.getTypeSizeInBits(I.getType()) / 64; |
| 18400 | Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); |
| 18401 | Info.ptrVal = I.getArgOperand(I.getNumArgOperands() - 1); |
| 18402 | Info.offset = 0; |
| 18403 | Info.align.reset(); |
| 18404 | // volatile loads with NEON intrinsics not supported |
| 18405 | Info.flags = MachineMemOperand::MOLoad; |
| 18406 | return true; |
| 18407 | } |
| 18408 | case Intrinsic::arm_neon_vst1: |
| 18409 | case Intrinsic::arm_neon_vst2: |
| 18410 | case Intrinsic::arm_neon_vst3: |
| 18411 | case Intrinsic::arm_neon_vst4: |
| 18412 | case Intrinsic::arm_neon_vst2lane: |
| 18413 | case Intrinsic::arm_neon_vst3lane: |
| 18414 | case Intrinsic::arm_neon_vst4lane: { |
| 18415 | Info.opc = ISD::INTRINSIC_VOID; |
| 18416 | // Conservatively set memVT to the entire set of vectors stored. |
| 18417 | auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); |
| 18418 | unsigned NumElts = 0; |
| 18419 | for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) { |
| 18420 | Type *ArgTy = I.getArgOperand(ArgI)->getType(); |
| 18421 | if (!ArgTy->isVectorTy()) |
| 18422 | break; |
| 18423 | NumElts += DL.getTypeSizeInBits(ArgTy) / 64; |
| 18424 | } |
| 18425 | Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); |
| 18426 | Info.ptrVal = I.getArgOperand(0); |
| 18427 | Info.offset = 0; |
| 18428 | Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); |
| 18429 | Info.align = cast<ConstantInt>(AlignArg)->getMaybeAlignValue(); |
| 18430 | // volatile stores with NEON intrinsics not supported |
| 18431 | Info.flags = MachineMemOperand::MOStore; |
| 18432 | return true; |
| 18433 | } |
| 18434 | case Intrinsic::arm_neon_vst1x2: |
| 18435 | case Intrinsic::arm_neon_vst1x3: |
| 18436 | case Intrinsic::arm_neon_vst1x4: { |
| 18437 | Info.opc = ISD::INTRINSIC_VOID; |
| 18438 | // Conservatively set memVT to the entire set of vectors stored. |
| 18439 | auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); |
| 18440 | unsigned NumElts = 0; |
| 18441 | for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) { |
| 18442 | Type *ArgTy = I.getArgOperand(ArgI)->getType(); |
| 18443 | if (!ArgTy->isVectorTy()) |
| 18444 | break; |
| 18445 | NumElts += DL.getTypeSizeInBits(ArgTy) / 64; |
| 18446 | } |
| 18447 | Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); |
| 18448 | Info.ptrVal = I.getArgOperand(0); |
| 18449 | Info.offset = 0; |
| 18450 | Info.align.reset(); |
| 18451 | // volatile stores with NEON intrinsics not supported |
| 18452 | Info.flags = MachineMemOperand::MOStore; |
| 18453 | return true; |
| 18454 | } |
| 18455 | case Intrinsic::arm_mve_vld2q: |
| 18456 | case Intrinsic::arm_mve_vld4q: { |
| 18457 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 18458 | // Conservatively set memVT to the entire set of vectors loaded. |
| 18459 | Type *VecTy = cast<StructType>(I.getType())->getElementType(1); |
| 18460 | unsigned Factor = Intrinsic == Intrinsic::arm_mve_vld2q ? 2 : 4; |
| 18461 | Info.memVT = EVT::getVectorVT(VecTy->getContext(), MVT::i64, Factor * 2); |
| 18462 | Info.ptrVal = I.getArgOperand(0); |
| 18463 | Info.offset = 0; |
| 18464 | Info.align = Align(VecTy->getScalarSizeInBits() / 8); |
| 18465 | // volatile loads with MVE intrinsics not supported |
| 18466 | Info.flags = MachineMemOperand::MOLoad; |
| 18467 | return true; |
| 18468 | } |
| 18469 | case Intrinsic::arm_mve_vst2q: |
| 18470 | case Intrinsic::arm_mve_vst4q: { |
| 18471 | Info.opc = ISD::INTRINSIC_VOID; |
| 18472 | // Conservatively set memVT to the entire set of vectors stored. |
| 18473 | Type *VecTy = I.getArgOperand(1)->getType(); |
| 18474 | unsigned Factor = Intrinsic == Intrinsic::arm_mve_vst2q ? 2 : 4; |
| 18475 | Info.memVT = EVT::getVectorVT(VecTy->getContext(), MVT::i64, Factor * 2); |
| 18476 | Info.ptrVal = I.getArgOperand(0); |
| 18477 | Info.offset = 0; |
| 18478 | Info.align = Align(VecTy->getScalarSizeInBits() / 8); |
| 18479 | // volatile stores with MVE intrinsics not supported |
| 18480 | Info.flags = MachineMemOperand::MOStore; |
| 18481 | return true; |
| 18482 | } |
| 18483 | case Intrinsic::arm_ldaex: |
| 18484 | case Intrinsic::arm_ldrex: { |
| 18485 | auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); |
| 18486 | PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType()); |
| 18487 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 18488 | Info.memVT = MVT::getVT(PtrTy->getElementType()); |
| 18489 | Info.ptrVal = I.getArgOperand(0); |
| 18490 | Info.offset = 0; |
| 18491 | Info.align = DL.getABITypeAlign(PtrTy->getElementType()); |
| 18492 | Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile; |
| 18493 | return true; |
| 18494 | } |
| 18495 | case Intrinsic::arm_stlex: |
| 18496 | case Intrinsic::arm_strex: { |
| 18497 | auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); |
| 18498 | PointerType *PtrTy = cast<PointerType>(I.getArgOperand(1)->getType()); |
| 18499 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 18500 | Info.memVT = MVT::getVT(PtrTy->getElementType()); |
| 18501 | Info.ptrVal = I.getArgOperand(1); |
| 18502 | Info.offset = 0; |
| 18503 | Info.align = DL.getABITypeAlign(PtrTy->getElementType()); |
| 18504 | Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile; |
| 18505 | return true; |
| 18506 | } |
| 18507 | case Intrinsic::arm_stlexd: |
| 18508 | case Intrinsic::arm_strexd: |
| 18509 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 18510 | Info.memVT = MVT::i64; |
| 18511 | Info.ptrVal = I.getArgOperand(2); |
| 18512 | Info.offset = 0; |
| 18513 | Info.align = Align(8); |
| 18514 | Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile; |
| 18515 | return true; |
| 18516 | |
| 18517 | case Intrinsic::arm_ldaexd: |
| 18518 | case Intrinsic::arm_ldrexd: |
| 18519 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
| 18520 | Info.memVT = MVT::i64; |
| 18521 | Info.ptrVal = I.getArgOperand(0); |
| 18522 | Info.offset = 0; |
| 18523 | Info.align = Align(8); |
| 18524 | Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile; |
| 18525 | return true; |
| 18526 | |
| 18527 | default: |
| 18528 | break; |
| 18529 | } |
| 18530 | |
| 18531 | return false; |
| 18532 | } |
| 18533 | |
| 18534 | /// Returns true if it is beneficial to convert a load of a constant |
| 18535 | /// to just the constant itself. |
| 18536 | bool ARMTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, |
| 18537 | Type *Ty) const { |
| 18538 | assert(Ty->isIntegerTy()); |
| 18539 | |
| 18540 | unsigned Bits = Ty->getPrimitiveSizeInBits(); |
| 18541 | if (Bits == 0 || Bits > 32) |
| 18542 | return false; |
| 18543 | return true; |
| 18544 | } |
| 18545 | |
| 18546 | bool ARMTargetLowering::(EVT ResVT, EVT SrcVT, |
| 18547 | unsigned Index) const { |
| 18548 | if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT)) |
| 18549 | return false; |
| 18550 | |
| 18551 | return (Index == 0 || Index == ResVT.getVectorNumElements()); |
| 18552 | } |
| 18553 | |
| 18554 | Instruction* ARMTargetLowering::makeDMB(IRBuilder<> &Builder, |
| 18555 | ARM_MB::MemBOpt Domain) const { |
| 18556 | Module *M = Builder.GetInsertBlock()->getParent()->getParent(); |
| 18557 | |
| 18558 | // First, if the target has no DMB, see what fallback we can use. |
| 18559 | if (!Subtarget->hasDataBarrier()) { |
| 18560 | // Some ARMv6 cpus can support data barriers with an mcr instruction. |
| 18561 | // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get |
| 18562 | // here. |
| 18563 | if (Subtarget->hasV6Ops() && !Subtarget->isThumb()) { |
| 18564 | Function *MCR = Intrinsic::getDeclaration(M, Intrinsic::arm_mcr); |
| 18565 | Value* args[6] = {Builder.getInt32(15), Builder.getInt32(0), |
| 18566 | Builder.getInt32(0), Builder.getInt32(7), |
| 18567 | Builder.getInt32(10), Builder.getInt32(5)}; |
| 18568 | return Builder.CreateCall(MCR, args); |
| 18569 | } else { |
| 18570 | // Instead of using barriers, atomic accesses on these subtargets use |
| 18571 | // libcalls. |
| 18572 | llvm_unreachable("makeDMB on a target so old that it has no barriers" ); |
| 18573 | } |
| 18574 | } else { |
| 18575 | Function *DMB = Intrinsic::getDeclaration(M, Intrinsic::arm_dmb); |
| 18576 | // Only a full system barrier exists in the M-class architectures. |
| 18577 | Domain = Subtarget->isMClass() ? ARM_MB::SY : Domain; |
| 18578 | Constant *CDomain = Builder.getInt32(Domain); |
| 18579 | return Builder.CreateCall(DMB, CDomain); |
| 18580 | } |
| 18581 | } |
| 18582 | |
| 18583 | // Based on http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html |
| 18584 | Instruction *ARMTargetLowering::emitLeadingFence(IRBuilder<> &Builder, |
| 18585 | Instruction *Inst, |
| 18586 | AtomicOrdering Ord) const { |
| 18587 | switch (Ord) { |
| 18588 | case AtomicOrdering::NotAtomic: |
| 18589 | case AtomicOrdering::Unordered: |
| 18590 | llvm_unreachable("Invalid fence: unordered/non-atomic" ); |
| 18591 | case AtomicOrdering::Monotonic: |
| 18592 | case AtomicOrdering::Acquire: |
| 18593 | return nullptr; // Nothing to do |
| 18594 | case AtomicOrdering::SequentiallyConsistent: |
| 18595 | if (!Inst->hasAtomicStore()) |
| 18596 | return nullptr; // Nothing to do |
| 18597 | LLVM_FALLTHROUGH; |
| 18598 | case AtomicOrdering::Release: |
| 18599 | case AtomicOrdering::AcquireRelease: |
| 18600 | if (Subtarget->preferISHSTBarriers()) |
| 18601 | return makeDMB(Builder, ARM_MB::ISHST); |
| 18602 | // FIXME: add a comment with a link to documentation justifying this. |
| 18603 | else |
| 18604 | return makeDMB(Builder, ARM_MB::ISH); |
| 18605 | } |
| 18606 | llvm_unreachable("Unknown fence ordering in emitLeadingFence" ); |
| 18607 | } |
| 18608 | |
| 18609 | Instruction *ARMTargetLowering::emitTrailingFence(IRBuilder<> &Builder, |
| 18610 | Instruction *Inst, |
| 18611 | AtomicOrdering Ord) const { |
| 18612 | switch (Ord) { |
| 18613 | case AtomicOrdering::NotAtomic: |
| 18614 | case AtomicOrdering::Unordered: |
| 18615 | llvm_unreachable("Invalid fence: unordered/not-atomic" ); |
| 18616 | case AtomicOrdering::Monotonic: |
| 18617 | case AtomicOrdering::Release: |
| 18618 | return nullptr; // Nothing to do |
| 18619 | case AtomicOrdering::Acquire: |
| 18620 | case AtomicOrdering::AcquireRelease: |
| 18621 | case AtomicOrdering::SequentiallyConsistent: |
| 18622 | return makeDMB(Builder, ARM_MB::ISH); |
| 18623 | } |
| 18624 | llvm_unreachable("Unknown fence ordering in emitTrailingFence" ); |
| 18625 | } |
| 18626 | |
| 18627 | // Loads and stores less than 64-bits are already atomic; ones above that |
| 18628 | // are doomed anyway, so defer to the default libcall and blame the OS when |
| 18629 | // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit |
| 18630 | // anything for those. |
| 18631 | bool ARMTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const { |
| 18632 | unsigned Size = SI->getValueOperand()->getType()->getPrimitiveSizeInBits(); |
| 18633 | return (Size == 64) && !Subtarget->isMClass(); |
| 18634 | } |
| 18635 | |
| 18636 | // Loads and stores less than 64-bits are already atomic; ones above that |
| 18637 | // are doomed anyway, so defer to the default libcall and blame the OS when |
| 18638 | // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit |
| 18639 | // anything for those. |
| 18640 | // FIXME: ldrd and strd are atomic if the CPU has LPAE (e.g. A15 has that |
| 18641 | // guarantee, see DDI0406C ARM architecture reference manual, |
| 18642 | // sections A8.8.72-74 LDRD) |
| 18643 | TargetLowering::AtomicExpansionKind |
| 18644 | ARMTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const { |
| 18645 | unsigned Size = LI->getType()->getPrimitiveSizeInBits(); |
| 18646 | return ((Size == 64) && !Subtarget->isMClass()) ? AtomicExpansionKind::LLOnly |
| 18647 | : AtomicExpansionKind::None; |
| 18648 | } |
| 18649 | |
| 18650 | // For the real atomic operations, we have ldrex/strex up to 32 bits, |
| 18651 | // and up to 64 bits on the non-M profiles |
| 18652 | TargetLowering::AtomicExpansionKind |
| 18653 | ARMTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { |
| 18654 | if (AI->isFloatingPointOperation()) |
| 18655 | return AtomicExpansionKind::CmpXChg; |
| 18656 | |
| 18657 | unsigned Size = AI->getType()->getPrimitiveSizeInBits(); |
| 18658 | bool hasAtomicRMW = !Subtarget->isThumb() || Subtarget->hasV8MBaselineOps(); |
| 18659 | return (Size <= (Subtarget->isMClass() ? 32U : 64U) && hasAtomicRMW) |
| 18660 | ? AtomicExpansionKind::LLSC |
| 18661 | : AtomicExpansionKind::None; |
| 18662 | } |
| 18663 | |
| 18664 | // Similar to shouldExpandAtomicRMWInIR, ldrex/strex can be used up to 32 |
| 18665 | // bits, and up to 64 bits on the non-M profiles. |
| 18666 | TargetLowering::AtomicExpansionKind |
| 18667 | ARMTargetLowering::shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const { |
| 18668 | // At -O0, fast-regalloc cannot cope with the live vregs necessary to |
| 18669 | // implement cmpxchg without spilling. If the address being exchanged is also |
| 18670 | // on the stack and close enough to the spill slot, this can lead to a |
| 18671 | // situation where the monitor always gets cleared and the atomic operation |
| 18672 | // can never succeed. So at -O0 we need a late-expanded pseudo-inst instead. |
| 18673 | unsigned Size = AI->getOperand(1)->getType()->getPrimitiveSizeInBits(); |
| 18674 | bool HasAtomicCmpXchg = |
| 18675 | !Subtarget->isThumb() || Subtarget->hasV8MBaselineOps(); |
| 18676 | if (getTargetMachine().getOptLevel() != 0 && HasAtomicCmpXchg && |
| 18677 | Size <= (Subtarget->isMClass() ? 32U : 64U)) |
| 18678 | return AtomicExpansionKind::LLSC; |
| 18679 | return AtomicExpansionKind::None; |
| 18680 | } |
| 18681 | |
| 18682 | bool ARMTargetLowering::shouldInsertFencesForAtomic( |
| 18683 | const Instruction *I) const { |
| 18684 | return InsertFencesForAtomic; |
| 18685 | } |
| 18686 | |
| 18687 | // This has so far only been implemented for MachO. |
| 18688 | bool ARMTargetLowering::useLoadStackGuardNode() const { |
| 18689 | return Subtarget->isTargetMachO(); |
| 18690 | } |
| 18691 | |
| 18692 | void ARMTargetLowering::insertSSPDeclarations(Module &M) const { |
| 18693 | if (!Subtarget->getTargetTriple().isWindowsMSVCEnvironment()) |
| 18694 | return TargetLowering::insertSSPDeclarations(M); |
| 18695 | |
| 18696 | // MSVC CRT has a global variable holding security cookie. |
| 18697 | M.getOrInsertGlobal("__security_cookie" , |
| 18698 | Type::getInt8PtrTy(M.getContext())); |
| 18699 | |
| 18700 | // MSVC CRT has a function to validate security cookie. |
| 18701 | FunctionCallee SecurityCheckCookie = M.getOrInsertFunction( |
| 18702 | "__security_check_cookie" , Type::getVoidTy(M.getContext()), |
| 18703 | Type::getInt8PtrTy(M.getContext())); |
| 18704 | if (Function *F = dyn_cast<Function>(SecurityCheckCookie.getCallee())) |
| 18705 | F->addAttribute(1, Attribute::AttrKind::InReg); |
| 18706 | } |
| 18707 | |
| 18708 | Value *ARMTargetLowering::getSDagStackGuard(const Module &M) const { |
| 18709 | // MSVC CRT has a global variable holding security cookie. |
| 18710 | if (Subtarget->getTargetTriple().isWindowsMSVCEnvironment()) |
| 18711 | return M.getGlobalVariable("__security_cookie" ); |
| 18712 | return TargetLowering::getSDagStackGuard(M); |
| 18713 | } |
| 18714 | |
| 18715 | Function *ARMTargetLowering::getSSPStackGuardCheck(const Module &M) const { |
| 18716 | // MSVC CRT has a function to validate security cookie. |
| 18717 | if (Subtarget->getTargetTriple().isWindowsMSVCEnvironment()) |
| 18718 | return M.getFunction("__security_check_cookie" ); |
| 18719 | return TargetLowering::getSSPStackGuardCheck(M); |
| 18720 | } |
| 18721 | |
| 18722 | bool ARMTargetLowering::canCombineStoreAndExtract(Type *VectorTy, Value *Idx, |
| 18723 | unsigned &Cost) const { |
| 18724 | // If we do not have NEON, vector types are not natively supported. |
| 18725 | if (!Subtarget->hasNEON()) |
| 18726 | return false; |
| 18727 | |
| 18728 | // Floating point values and vector values map to the same register file. |
| 18729 | // Therefore, although we could do a store extract of a vector type, this is |
| 18730 | // better to leave at float as we have more freedom in the addressing mode for |
| 18731 | // those. |
| 18732 | if (VectorTy->isFPOrFPVectorTy()) |
| 18733 | return false; |
| 18734 | |
| 18735 | // If the index is unknown at compile time, this is very expensive to lower |
| 18736 | // and it is not possible to combine the store with the extract. |
| 18737 | if (!isa<ConstantInt>(Idx)) |
| 18738 | return false; |
| 18739 | |
| 18740 | assert(VectorTy->isVectorTy() && "VectorTy is not a vector type" ); |
| 18741 | unsigned BitWidth = VectorTy->getPrimitiveSizeInBits().getFixedSize(); |
| 18742 | // We can do a store + vector extract on any vector that fits perfectly in a D |
| 18743 | // or Q register. |
| 18744 | if (BitWidth == 64 || BitWidth == 128) { |
| 18745 | Cost = 0; |
| 18746 | return true; |
| 18747 | } |
| 18748 | return false; |
| 18749 | } |
| 18750 | |
| 18751 | bool ARMTargetLowering::isCheapToSpeculateCttz() const { |
| 18752 | return Subtarget->hasV6T2Ops(); |
| 18753 | } |
| 18754 | |
| 18755 | bool ARMTargetLowering::isCheapToSpeculateCtlz() const { |
| 18756 | return Subtarget->hasV6T2Ops(); |
| 18757 | } |
| 18758 | |
| 18759 | bool ARMTargetLowering::shouldExpandShift(SelectionDAG &DAG, SDNode *N) const { |
| 18760 | return !Subtarget->hasMinSize() || Subtarget->isTargetWindows(); |
| 18761 | } |
| 18762 | |
| 18763 | Value *ARMTargetLowering::emitLoadLinked(IRBuilder<> &Builder, Value *Addr, |
| 18764 | AtomicOrdering Ord) const { |
| 18765 | Module *M = Builder.GetInsertBlock()->getParent()->getParent(); |
| 18766 | Type *ValTy = cast<PointerType>(Addr->getType())->getElementType(); |
| 18767 | bool IsAcquire = isAcquireOrStronger(Ord); |
| 18768 | |
| 18769 | // Since i64 isn't legal and intrinsics don't get type-lowered, the ldrexd |
| 18770 | // intrinsic must return {i32, i32} and we have to recombine them into a |
| 18771 | // single i64 here. |
| 18772 | if (ValTy->getPrimitiveSizeInBits() == 64) { |
| 18773 | Intrinsic::ID Int = |
| 18774 | IsAcquire ? Intrinsic::arm_ldaexd : Intrinsic::arm_ldrexd; |
| 18775 | Function *Ldrex = Intrinsic::getDeclaration(M, Int); |
| 18776 | |
| 18777 | Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext())); |
| 18778 | Value *LoHi = Builder.CreateCall(Ldrex, Addr, "lohi" ); |
| 18779 | |
| 18780 | Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo" ); |
| 18781 | Value *Hi = Builder.CreateExtractValue(LoHi, 1, "hi" ); |
| 18782 | if (!Subtarget->isLittle()) |
| 18783 | std::swap (Lo, Hi); |
| 18784 | Lo = Builder.CreateZExt(Lo, ValTy, "lo64" ); |
| 18785 | Hi = Builder.CreateZExt(Hi, ValTy, "hi64" ); |
| 18786 | return Builder.CreateOr( |
| 18787 | Lo, Builder.CreateShl(Hi, ConstantInt::get(ValTy, 32)), "val64" ); |
| 18788 | } |
| 18789 | |
| 18790 | Type *Tys[] = { Addr->getType() }; |
| 18791 | Intrinsic::ID Int = IsAcquire ? Intrinsic::arm_ldaex : Intrinsic::arm_ldrex; |
| 18792 | Function *Ldrex = Intrinsic::getDeclaration(M, Int, Tys); |
| 18793 | |
| 18794 | return Builder.CreateTruncOrBitCast( |
| 18795 | Builder.CreateCall(Ldrex, Addr), |
| 18796 | cast<PointerType>(Addr->getType())->getElementType()); |
| 18797 | } |
| 18798 | |
| 18799 | void ARMTargetLowering::emitAtomicCmpXchgNoStoreLLBalance( |
| 18800 | IRBuilder<> &Builder) const { |
| 18801 | if (!Subtarget->hasV7Ops()) |
| 18802 | return; |
| 18803 | Module *M = Builder.GetInsertBlock()->getParent()->getParent(); |
| 18804 | Builder.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::arm_clrex)); |
| 18805 | } |
| 18806 | |
| 18807 | Value *ARMTargetLowering::emitStoreConditional(IRBuilder<> &Builder, Value *Val, |
| 18808 | Value *Addr, |
| 18809 | AtomicOrdering Ord) const { |
| 18810 | Module *M = Builder.GetInsertBlock()->getParent()->getParent(); |
| 18811 | bool IsRelease = isReleaseOrStronger(Ord); |
| 18812 | |
| 18813 | // Since the intrinsics must have legal type, the i64 intrinsics take two |
| 18814 | // parameters: "i32, i32". We must marshal Val into the appropriate form |
| 18815 | // before the call. |
| 18816 | if (Val->getType()->getPrimitiveSizeInBits() == 64) { |
| 18817 | Intrinsic::ID Int = |
| 18818 | IsRelease ? Intrinsic::arm_stlexd : Intrinsic::arm_strexd; |
| 18819 | Function *Strex = Intrinsic::getDeclaration(M, Int); |
| 18820 | Type *Int32Ty = Type::getInt32Ty(M->getContext()); |
| 18821 | |
| 18822 | Value *Lo = Builder.CreateTrunc(Val, Int32Ty, "lo" ); |
| 18823 | Value *Hi = Builder.CreateTrunc(Builder.CreateLShr(Val, 32), Int32Ty, "hi" ); |
| 18824 | if (!Subtarget->isLittle()) |
| 18825 | std::swap(Lo, Hi); |
| 18826 | Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext())); |
| 18827 | return Builder.CreateCall(Strex, {Lo, Hi, Addr}); |
| 18828 | } |
| 18829 | |
| 18830 | Intrinsic::ID Int = IsRelease ? Intrinsic::arm_stlex : Intrinsic::arm_strex; |
| 18831 | Type *Tys[] = { Addr->getType() }; |
| 18832 | Function *Strex = Intrinsic::getDeclaration(M, Int, Tys); |
| 18833 | |
| 18834 | return Builder.CreateCall( |
| 18835 | Strex, {Builder.CreateZExtOrBitCast( |
| 18836 | Val, Strex->getFunctionType()->getParamType(0)), |
| 18837 | Addr}); |
| 18838 | } |
| 18839 | |
| 18840 | |
| 18841 | bool ARMTargetLowering::alignLoopsWithOptSize() const { |
| 18842 | return Subtarget->isMClass(); |
| 18843 | } |
| 18844 | |
| 18845 | /// A helper function for determining the number of interleaved accesses we |
| 18846 | /// will generate when lowering accesses of the given type. |
| 18847 | unsigned |
| 18848 | ARMTargetLowering::getNumInterleavedAccesses(VectorType *VecTy, |
| 18849 | const DataLayout &DL) const { |
| 18850 | return (DL.getTypeSizeInBits(VecTy) + 127) / 128; |
| 18851 | } |
| 18852 | |
| 18853 | bool ARMTargetLowering::isLegalInterleavedAccessType( |
| 18854 | unsigned Factor, FixedVectorType *VecTy, const DataLayout &DL) const { |
| 18855 | |
| 18856 | unsigned VecSize = DL.getTypeSizeInBits(VecTy); |
| 18857 | unsigned ElSize = DL.getTypeSizeInBits(VecTy->getElementType()); |
| 18858 | |
| 18859 | if (!Subtarget->hasNEON() && !Subtarget->hasMVEIntegerOps()) |
| 18860 | return false; |
| 18861 | |
| 18862 | // Ensure the vector doesn't have f16 elements. Even though we could do an |
| 18863 | // i16 vldN, we can't hold the f16 vectors and will end up converting via |
| 18864 | // f32. |
| 18865 | if (Subtarget->hasNEON() && VecTy->getElementType()->isHalfTy()) |
| 18866 | return false; |
| 18867 | if (Subtarget->hasMVEIntegerOps() && Factor == 3) |
| 18868 | return false; |
| 18869 | |
| 18870 | // Ensure the number of vector elements is greater than 1. |
| 18871 | if (VecTy->getNumElements() < 2) |
| 18872 | return false; |
| 18873 | |
| 18874 | // Ensure the element type is legal. |
| 18875 | if (ElSize != 8 && ElSize != 16 && ElSize != 32) |
| 18876 | return false; |
| 18877 | |
| 18878 | // Ensure the total vector size is 64 or a multiple of 128. Types larger than |
| 18879 | // 128 will be split into multiple interleaved accesses. |
| 18880 | if (Subtarget->hasNEON() && VecSize == 64) |
| 18881 | return true; |
| 18882 | return VecSize % 128 == 0; |
| 18883 | } |
| 18884 | |
| 18885 | unsigned ARMTargetLowering::getMaxSupportedInterleaveFactor() const { |
| 18886 | if (Subtarget->hasNEON()) |
| 18887 | return 4; |
| 18888 | if (Subtarget->hasMVEIntegerOps()) |
| 18889 | return MVEMaxSupportedInterleaveFactor; |
| 18890 | return TargetLoweringBase::getMaxSupportedInterleaveFactor(); |
| 18891 | } |
| 18892 | |
| 18893 | /// Lower an interleaved load into a vldN intrinsic. |
| 18894 | /// |
| 18895 | /// E.g. Lower an interleaved load (Factor = 2): |
| 18896 | /// %wide.vec = load <8 x i32>, <8 x i32>* %ptr, align 4 |
| 18897 | /// %v0 = shuffle %wide.vec, undef, <0, 2, 4, 6> ; Extract even elements |
| 18898 | /// %v1 = shuffle %wide.vec, undef, <1, 3, 5, 7> ; Extract odd elements |
| 18899 | /// |
| 18900 | /// Into: |
| 18901 | /// %vld2 = { <4 x i32>, <4 x i32> } call llvm.arm.neon.vld2(%ptr, 4) |
| 18902 | /// %vec0 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 0 |
| 18903 | /// %vec1 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 1 |
| 18904 | bool ARMTargetLowering::lowerInterleavedLoad( |
| 18905 | LoadInst *LI, ArrayRef<ShuffleVectorInst *> Shuffles, |
| 18906 | ArrayRef<unsigned> Indices, unsigned Factor) const { |
| 18907 | assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && |
| 18908 | "Invalid interleave factor" ); |
| 18909 | assert(!Shuffles.empty() && "Empty shufflevector input" ); |
| 18910 | assert(Shuffles.size() == Indices.size() && |
| 18911 | "Unmatched number of shufflevectors and indices" ); |
| 18912 | |
| 18913 | auto *VecTy = cast<FixedVectorType>(Shuffles[0]->getType()); |
| 18914 | Type *EltTy = VecTy->getElementType(); |
| 18915 | |
| 18916 | const DataLayout &DL = LI->getModule()->getDataLayout(); |
| 18917 | |
| 18918 | // Skip if we do not have NEON and skip illegal vector types. We can |
| 18919 | // "legalize" wide vector types into multiple interleaved accesses as long as |
| 18920 | // the vector types are divisible by 128. |
| 18921 | if (!isLegalInterleavedAccessType(Factor, VecTy, DL)) |
| 18922 | return false; |
| 18923 | |
| 18924 | unsigned NumLoads = getNumInterleavedAccesses(VecTy, DL); |
| 18925 | |
| 18926 | // A pointer vector can not be the return type of the ldN intrinsics. Need to |
| 18927 | // load integer vectors first and then convert to pointer vectors. |
| 18928 | if (EltTy->isPointerTy()) |
| 18929 | VecTy = FixedVectorType::get(DL.getIntPtrType(EltTy), VecTy); |
| 18930 | |
| 18931 | IRBuilder<> Builder(LI); |
| 18932 | |
| 18933 | // The base address of the load. |
| 18934 | Value *BaseAddr = LI->getPointerOperand(); |
| 18935 | |
| 18936 | if (NumLoads > 1) { |
| 18937 | // If we're going to generate more than one load, reset the sub-vector type |
| 18938 | // to something legal. |
| 18939 | VecTy = FixedVectorType::get(VecTy->getElementType(), |
| 18940 | VecTy->getNumElements() / NumLoads); |
| 18941 | |
| 18942 | // We will compute the pointer operand of each load from the original base |
| 18943 | // address using GEPs. Cast the base address to a pointer to the scalar |
| 18944 | // element type. |
| 18945 | BaseAddr = Builder.CreateBitCast( |
| 18946 | BaseAddr, |
| 18947 | VecTy->getElementType()->getPointerTo(LI->getPointerAddressSpace())); |
| 18948 | } |
| 18949 | |
| 18950 | assert(isTypeLegal(EVT::getEVT(VecTy)) && "Illegal vldN vector type!" ); |
| 18951 | |
| 18952 | auto createLoadIntrinsic = [&](Value *BaseAddr) { |
| 18953 | if (Subtarget->hasNEON()) { |
| 18954 | Type *Int8Ptr = Builder.getInt8PtrTy(LI->getPointerAddressSpace()); |
| 18955 | Type *Tys[] = {VecTy, Int8Ptr}; |
| 18956 | static const Intrinsic::ID LoadInts[3] = {Intrinsic::arm_neon_vld2, |
| 18957 | Intrinsic::arm_neon_vld3, |
| 18958 | Intrinsic::arm_neon_vld4}; |
| 18959 | Function *VldnFunc = |
| 18960 | Intrinsic::getDeclaration(LI->getModule(), LoadInts[Factor - 2], Tys); |
| 18961 | |
| 18962 | SmallVector<Value *, 2> Ops; |
| 18963 | Ops.push_back(Builder.CreateBitCast(BaseAddr, Int8Ptr)); |
| 18964 | Ops.push_back(Builder.getInt32(LI->getAlignment())); |
| 18965 | |
| 18966 | return Builder.CreateCall(VldnFunc, Ops, "vldN" ); |
| 18967 | } else { |
| 18968 | assert((Factor == 2 || Factor == 4) && |
| 18969 | "expected interleave factor of 2 or 4 for MVE" ); |
| 18970 | Intrinsic::ID LoadInts = |
| 18971 | Factor == 2 ? Intrinsic::arm_mve_vld2q : Intrinsic::arm_mve_vld4q; |
| 18972 | Type *VecEltTy = |
| 18973 | VecTy->getElementType()->getPointerTo(LI->getPointerAddressSpace()); |
| 18974 | Type *Tys[] = {VecTy, VecEltTy}; |
| 18975 | Function *VldnFunc = |
| 18976 | Intrinsic::getDeclaration(LI->getModule(), LoadInts, Tys); |
| 18977 | |
| 18978 | SmallVector<Value *, 2> Ops; |
| 18979 | Ops.push_back(Builder.CreateBitCast(BaseAddr, VecEltTy)); |
| 18980 | return Builder.CreateCall(VldnFunc, Ops, "vldN" ); |
| 18981 | } |
| 18982 | }; |
| 18983 | |
| 18984 | // Holds sub-vectors extracted from the load intrinsic return values. The |
| 18985 | // sub-vectors are associated with the shufflevector instructions they will |
| 18986 | // replace. |
| 18987 | DenseMap<ShuffleVectorInst *, SmallVector<Value *, 4>> SubVecs; |
| 18988 | |
| 18989 | for (unsigned LoadCount = 0; LoadCount < NumLoads; ++LoadCount) { |
| 18990 | // If we're generating more than one load, compute the base address of |
| 18991 | // subsequent loads as an offset from the previous. |
| 18992 | if (LoadCount > 0) |
| 18993 | BaseAddr = Builder.CreateConstGEP1_32(VecTy->getElementType(), BaseAddr, |
| 18994 | VecTy->getNumElements() * Factor); |
| 18995 | |
| 18996 | CallInst *VldN = createLoadIntrinsic(BaseAddr); |
| 18997 | |
| 18998 | // Replace uses of each shufflevector with the corresponding vector loaded |
| 18999 | // by ldN. |
| 19000 | for (unsigned i = 0; i < Shuffles.size(); i++) { |
| 19001 | ShuffleVectorInst *SV = Shuffles[i]; |
| 19002 | unsigned Index = Indices[i]; |
| 19003 | |
| 19004 | Value *SubVec = Builder.CreateExtractValue(VldN, Index); |
| 19005 | |
| 19006 | // Convert the integer vector to pointer vector if the element is pointer. |
| 19007 | if (EltTy->isPointerTy()) |
| 19008 | SubVec = Builder.CreateIntToPtr( |
| 19009 | SubVec, |
| 19010 | FixedVectorType::get(SV->getType()->getElementType(), VecTy)); |
| 19011 | |
| 19012 | SubVecs[SV].push_back(SubVec); |
| 19013 | } |
| 19014 | } |
| 19015 | |
| 19016 | // Replace uses of the shufflevector instructions with the sub-vectors |
| 19017 | // returned by the load intrinsic. If a shufflevector instruction is |
| 19018 | // associated with more than one sub-vector, those sub-vectors will be |
| 19019 | // concatenated into a single wide vector. |
| 19020 | for (ShuffleVectorInst *SVI : Shuffles) { |
| 19021 | auto &SubVec = SubVecs[SVI]; |
| 19022 | auto *WideVec = |
| 19023 | SubVec.size() > 1 ? concatenateVectors(Builder, SubVec) : SubVec[0]; |
| 19024 | SVI->replaceAllUsesWith(WideVec); |
| 19025 | } |
| 19026 | |
| 19027 | return true; |
| 19028 | } |
| 19029 | |
| 19030 | /// Lower an interleaved store into a vstN intrinsic. |
| 19031 | /// |
| 19032 | /// E.g. Lower an interleaved store (Factor = 3): |
| 19033 | /// %i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1, |
| 19034 | /// <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> |
| 19035 | /// store <12 x i32> %i.vec, <12 x i32>* %ptr, align 4 |
| 19036 | /// |
| 19037 | /// Into: |
| 19038 | /// %sub.v0 = shuffle <8 x i32> %v0, <8 x i32> v1, <0, 1, 2, 3> |
| 19039 | /// %sub.v1 = shuffle <8 x i32> %v0, <8 x i32> v1, <4, 5, 6, 7> |
| 19040 | /// %sub.v2 = shuffle <8 x i32> %v0, <8 x i32> v1, <8, 9, 10, 11> |
| 19041 | /// call void llvm.arm.neon.vst3(%ptr, %sub.v0, %sub.v1, %sub.v2, 4) |
| 19042 | /// |
| 19043 | /// Note that the new shufflevectors will be removed and we'll only generate one |
| 19044 | /// vst3 instruction in CodeGen. |
| 19045 | /// |
| 19046 | /// Example for a more general valid mask (Factor 3). Lower: |
| 19047 | /// %i.vec = shuffle <32 x i32> %v0, <32 x i32> %v1, |
| 19048 | /// <4, 32, 16, 5, 33, 17, 6, 34, 18, 7, 35, 19> |
| 19049 | /// store <12 x i32> %i.vec, <12 x i32>* %ptr |
| 19050 | /// |
| 19051 | /// Into: |
| 19052 | /// %sub.v0 = shuffle <32 x i32> %v0, <32 x i32> v1, <4, 5, 6, 7> |
| 19053 | /// %sub.v1 = shuffle <32 x i32> %v0, <32 x i32> v1, <32, 33, 34, 35> |
| 19054 | /// %sub.v2 = shuffle <32 x i32> %v0, <32 x i32> v1, <16, 17, 18, 19> |
| 19055 | /// call void llvm.arm.neon.vst3(%ptr, %sub.v0, %sub.v1, %sub.v2, 4) |
| 19056 | bool ARMTargetLowering::lowerInterleavedStore(StoreInst *SI, |
| 19057 | ShuffleVectorInst *SVI, |
| 19058 | unsigned Factor) const { |
| 19059 | assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && |
| 19060 | "Invalid interleave factor" ); |
| 19061 | |
| 19062 | auto *VecTy = cast<FixedVectorType>(SVI->getType()); |
| 19063 | assert(VecTy->getNumElements() % Factor == 0 && "Invalid interleaved store" ); |
| 19064 | |
| 19065 | unsigned LaneLen = VecTy->getNumElements() / Factor; |
| 19066 | Type *EltTy = VecTy->getElementType(); |
| 19067 | auto *SubVecTy = FixedVectorType::get(EltTy, LaneLen); |
| 19068 | |
| 19069 | const DataLayout &DL = SI->getModule()->getDataLayout(); |
| 19070 | |
| 19071 | // Skip if we do not have NEON and skip illegal vector types. We can |
| 19072 | // "legalize" wide vector types into multiple interleaved accesses as long as |
| 19073 | // the vector types are divisible by 128. |
| 19074 | if (!isLegalInterleavedAccessType(Factor, SubVecTy, DL)) |
| 19075 | return false; |
| 19076 | |
| 19077 | unsigned NumStores = getNumInterleavedAccesses(SubVecTy, DL); |
| 19078 | |
| 19079 | Value *Op0 = SVI->getOperand(0); |
| 19080 | Value *Op1 = SVI->getOperand(1); |
| 19081 | IRBuilder<> Builder(SI); |
| 19082 | |
| 19083 | // StN intrinsics don't support pointer vectors as arguments. Convert pointer |
| 19084 | // vectors to integer vectors. |
| 19085 | if (EltTy->isPointerTy()) { |
| 19086 | Type *IntTy = DL.getIntPtrType(EltTy); |
| 19087 | |
| 19088 | // Convert to the corresponding integer vector. |
| 19089 | auto *IntVecTy = |
| 19090 | FixedVectorType::get(IntTy, cast<FixedVectorType>(Op0->getType())); |
| 19091 | Op0 = Builder.CreatePtrToInt(Op0, IntVecTy); |
| 19092 | Op1 = Builder.CreatePtrToInt(Op1, IntVecTy); |
| 19093 | |
| 19094 | SubVecTy = FixedVectorType::get(IntTy, LaneLen); |
| 19095 | } |
| 19096 | |
| 19097 | // The base address of the store. |
| 19098 | Value *BaseAddr = SI->getPointerOperand(); |
| 19099 | |
| 19100 | if (NumStores > 1) { |
| 19101 | // If we're going to generate more than one store, reset the lane length |
| 19102 | // and sub-vector type to something legal. |
| 19103 | LaneLen /= NumStores; |
| 19104 | SubVecTy = FixedVectorType::get(SubVecTy->getElementType(), LaneLen); |
| 19105 | |
| 19106 | // We will compute the pointer operand of each store from the original base |
| 19107 | // address using GEPs. Cast the base address to a pointer to the scalar |
| 19108 | // element type. |
| 19109 | BaseAddr = Builder.CreateBitCast( |
| 19110 | BaseAddr, |
| 19111 | SubVecTy->getElementType()->getPointerTo(SI->getPointerAddressSpace())); |
| 19112 | } |
| 19113 | |
| 19114 | assert(isTypeLegal(EVT::getEVT(SubVecTy)) && "Illegal vstN vector type!" ); |
| 19115 | |
| 19116 | auto Mask = SVI->getShuffleMask(); |
| 19117 | |
| 19118 | auto createStoreIntrinsic = [&](Value *BaseAddr, |
| 19119 | SmallVectorImpl<Value *> &Shuffles) { |
| 19120 | if (Subtarget->hasNEON()) { |
| 19121 | static const Intrinsic::ID StoreInts[3] = {Intrinsic::arm_neon_vst2, |
| 19122 | Intrinsic::arm_neon_vst3, |
| 19123 | Intrinsic::arm_neon_vst4}; |
| 19124 | Type *Int8Ptr = Builder.getInt8PtrTy(SI->getPointerAddressSpace()); |
| 19125 | Type *Tys[] = {Int8Ptr, SubVecTy}; |
| 19126 | |
| 19127 | Function *VstNFunc = Intrinsic::getDeclaration( |
| 19128 | SI->getModule(), StoreInts[Factor - 2], Tys); |
| 19129 | |
| 19130 | SmallVector<Value *, 6> Ops; |
| 19131 | Ops.push_back(Builder.CreateBitCast(BaseAddr, Int8Ptr)); |
| 19132 | append_range(Ops, Shuffles); |
| 19133 | Ops.push_back(Builder.getInt32(SI->getAlignment())); |
| 19134 | Builder.CreateCall(VstNFunc, Ops); |
| 19135 | } else { |
| 19136 | assert((Factor == 2 || Factor == 4) && |
| 19137 | "expected interleave factor of 2 or 4 for MVE" ); |
| 19138 | Intrinsic::ID StoreInts = |
| 19139 | Factor == 2 ? Intrinsic::arm_mve_vst2q : Intrinsic::arm_mve_vst4q; |
| 19140 | Type *EltPtrTy = SubVecTy->getElementType()->getPointerTo( |
| 19141 | SI->getPointerAddressSpace()); |
| 19142 | Type *Tys[] = {EltPtrTy, SubVecTy}; |
| 19143 | Function *VstNFunc = |
| 19144 | Intrinsic::getDeclaration(SI->getModule(), StoreInts, Tys); |
| 19145 | |
| 19146 | SmallVector<Value *, 6> Ops; |
| 19147 | Ops.push_back(Builder.CreateBitCast(BaseAddr, EltPtrTy)); |
| 19148 | append_range(Ops, Shuffles); |
| 19149 | for (unsigned F = 0; F < Factor; F++) { |
| 19150 | Ops.push_back(Builder.getInt32(F)); |
| 19151 | Builder.CreateCall(VstNFunc, Ops); |
| 19152 | Ops.pop_back(); |
| 19153 | } |
| 19154 | } |
| 19155 | }; |
| 19156 | |
| 19157 | for (unsigned StoreCount = 0; StoreCount < NumStores; ++StoreCount) { |
| 19158 | // If we generating more than one store, we compute the base address of |
| 19159 | // subsequent stores as an offset from the previous. |
| 19160 | if (StoreCount > 0) |
| 19161 | BaseAddr = Builder.CreateConstGEP1_32(SubVecTy->getElementType(), |
| 19162 | BaseAddr, LaneLen * Factor); |
| 19163 | |
| 19164 | SmallVector<Value *, 4> Shuffles; |
| 19165 | |
| 19166 | // Split the shufflevector operands into sub vectors for the new vstN call. |
| 19167 | for (unsigned i = 0; i < Factor; i++) { |
| 19168 | unsigned IdxI = StoreCount * LaneLen * Factor + i; |
| 19169 | if (Mask[IdxI] >= 0) { |
| 19170 | Shuffles.push_back(Builder.CreateShuffleVector( |
| 19171 | Op0, Op1, createSequentialMask(Mask[IdxI], LaneLen, 0))); |
| 19172 | } else { |
| 19173 | unsigned StartMask = 0; |
| 19174 | for (unsigned j = 1; j < LaneLen; j++) { |
| 19175 | unsigned IdxJ = StoreCount * LaneLen * Factor + j; |
| 19176 | if (Mask[IdxJ * Factor + IdxI] >= 0) { |
| 19177 | StartMask = Mask[IdxJ * Factor + IdxI] - IdxJ; |
| 19178 | break; |
| 19179 | } |
| 19180 | } |
| 19181 | // Note: If all elements in a chunk are undefs, StartMask=0! |
| 19182 | // Note: Filling undef gaps with random elements is ok, since |
| 19183 | // those elements were being written anyway (with undefs). |
| 19184 | // In the case of all undefs we're defaulting to using elems from 0 |
| 19185 | // Note: StartMask cannot be negative, it's checked in |
| 19186 | // isReInterleaveMask |
| 19187 | Shuffles.push_back(Builder.CreateShuffleVector( |
| 19188 | Op0, Op1, createSequentialMask(StartMask, LaneLen, 0))); |
| 19189 | } |
| 19190 | } |
| 19191 | |
| 19192 | createStoreIntrinsic(BaseAddr, Shuffles); |
| 19193 | } |
| 19194 | return true; |
| 19195 | } |
| 19196 | |
| 19197 | enum HABaseType { |
| 19198 | HA_UNKNOWN = 0, |
| 19199 | HA_FLOAT, |
| 19200 | HA_DOUBLE, |
| 19201 | HA_VECT64, |
| 19202 | HA_VECT128 |
| 19203 | }; |
| 19204 | |
| 19205 | static bool isHomogeneousAggregate(Type *Ty, HABaseType &Base, |
| 19206 | uint64_t &Members) { |
| 19207 | if (auto *ST = dyn_cast<StructType>(Ty)) { |
| 19208 | for (unsigned i = 0; i < ST->getNumElements(); ++i) { |
| 19209 | uint64_t SubMembers = 0; |
| 19210 | if (!isHomogeneousAggregate(ST->getElementType(i), Base, SubMembers)) |
| 19211 | return false; |
| 19212 | Members += SubMembers; |
| 19213 | } |
| 19214 | } else if (auto *AT = dyn_cast<ArrayType>(Ty)) { |
| 19215 | uint64_t SubMembers = 0; |
| 19216 | if (!isHomogeneousAggregate(AT->getElementType(), Base, SubMembers)) |
| 19217 | return false; |
| 19218 | Members += SubMembers * AT->getNumElements(); |
| 19219 | } else if (Ty->isFloatTy()) { |
| 19220 | if (Base != HA_UNKNOWN && Base != HA_FLOAT) |
| 19221 | return false; |
| 19222 | Members = 1; |
| 19223 | Base = HA_FLOAT; |
| 19224 | } else if (Ty->isDoubleTy()) { |
| 19225 | if (Base != HA_UNKNOWN && Base != HA_DOUBLE) |
| 19226 | return false; |
| 19227 | Members = 1; |
| 19228 | Base = HA_DOUBLE; |
| 19229 | } else if (auto *VT = dyn_cast<VectorType>(Ty)) { |
| 19230 | Members = 1; |
| 19231 | switch (Base) { |
| 19232 | case HA_FLOAT: |
| 19233 | case HA_DOUBLE: |
| 19234 | return false; |
| 19235 | case HA_VECT64: |
| 19236 | return VT->getPrimitiveSizeInBits().getFixedSize() == 64; |
| 19237 | case HA_VECT128: |
| 19238 | return VT->getPrimitiveSizeInBits().getFixedSize() == 128; |
| 19239 | case HA_UNKNOWN: |
| 19240 | switch (VT->getPrimitiveSizeInBits().getFixedSize()) { |
| 19241 | case 64: |
| 19242 | Base = HA_VECT64; |
| 19243 | return true; |
| 19244 | case 128: |
| 19245 | Base = HA_VECT128; |
| 19246 | return true; |
| 19247 | default: |
| 19248 | return false; |
| 19249 | } |
| 19250 | } |
| 19251 | } |
| 19252 | |
| 19253 | return (Members > 0 && Members <= 4); |
| 19254 | } |
| 19255 | |
| 19256 | /// Return the correct alignment for the current calling convention. |
| 19257 | Align ARMTargetLowering::getABIAlignmentForCallingConv(Type *ArgTy, |
| 19258 | DataLayout DL) const { |
| 19259 | const Align ABITypeAlign = DL.getABITypeAlign(ArgTy); |
| 19260 | if (!ArgTy->isVectorTy()) |
| 19261 | return ABITypeAlign; |
| 19262 | |
| 19263 | // Avoid over-aligning vector parameters. It would require realigning the |
| 19264 | // stack and waste space for no real benefit. |
| 19265 | return std::min(ABITypeAlign, DL.getStackAlignment()); |
| 19266 | } |
| 19267 | |
| 19268 | /// Return true if a type is an AAPCS-VFP homogeneous aggregate or one of |
| 19269 | /// [N x i32] or [N x i64]. This allows front-ends to skip emitting padding when |
| 19270 | /// passing according to AAPCS rules. |
| 19271 | bool ARMTargetLowering::functionArgumentNeedsConsecutiveRegisters( |
| 19272 | Type *Ty, CallingConv::ID CallConv, bool isVarArg) const { |
| 19273 | if (getEffectiveCallingConv(CallConv, isVarArg) != |
| 19274 | CallingConv::ARM_AAPCS_VFP) |
| 19275 | return false; |
| 19276 | |
| 19277 | HABaseType Base = HA_UNKNOWN; |
| 19278 | uint64_t Members = 0; |
| 19279 | bool IsHA = isHomogeneousAggregate(Ty, Base, Members); |
| 19280 | LLVM_DEBUG(dbgs() << "isHA: " << IsHA << " " ; Ty->dump()); |
| 19281 | |
| 19282 | bool IsIntArray = Ty->isArrayTy() && Ty->getArrayElementType()->isIntegerTy(); |
| 19283 | return IsHA || IsIntArray; |
| 19284 | } |
| 19285 | |
| 19286 | Register ARMTargetLowering::getExceptionPointerRegister( |
| 19287 | const Constant *PersonalityFn) const { |
| 19288 | // Platforms which do not use SjLj EH may return values in these registers |
| 19289 | // via the personality function. |
| 19290 | return Subtarget->useSjLjEH() ? Register() : ARM::R0; |
| 19291 | } |
| 19292 | |
| 19293 | Register ARMTargetLowering::getExceptionSelectorRegister( |
| 19294 | const Constant *PersonalityFn) const { |
| 19295 | // Platforms which do not use SjLj EH may return values in these registers |
| 19296 | // via the personality function. |
| 19297 | return Subtarget->useSjLjEH() ? Register() : ARM::R1; |
| 19298 | } |
| 19299 | |
| 19300 | void ARMTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const { |
| 19301 | // Update IsSplitCSR in ARMFunctionInfo. |
| 19302 | ARMFunctionInfo *AFI = Entry->getParent()->getInfo<ARMFunctionInfo>(); |
| 19303 | AFI->setIsSplitCSR(true); |
| 19304 | } |
| 19305 | |
| 19306 | void ARMTargetLowering::insertCopiesSplitCSR( |
| 19307 | MachineBasicBlock *Entry, |
| 19308 | const SmallVectorImpl<MachineBasicBlock *> &Exits) const { |
| 19309 | const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo(); |
| 19310 | const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent()); |
| 19311 | if (!IStart) |
| 19312 | return; |
| 19313 | |
| 19314 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
| 19315 | MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo(); |
| 19316 | MachineBasicBlock::iterator MBBI = Entry->begin(); |
| 19317 | for (const MCPhysReg *I = IStart; *I; ++I) { |
| 19318 | const TargetRegisterClass *RC = nullptr; |
| 19319 | if (ARM::GPRRegClass.contains(*I)) |
| 19320 | RC = &ARM::GPRRegClass; |
| 19321 | else if (ARM::DPRRegClass.contains(*I)) |
| 19322 | RC = &ARM::DPRRegClass; |
| 19323 | else |
| 19324 | llvm_unreachable("Unexpected register class in CSRsViaCopy!" ); |
| 19325 | |
| 19326 | Register NewVR = MRI->createVirtualRegister(RC); |
| 19327 | // Create copy from CSR to a virtual register. |
| 19328 | // FIXME: this currently does not emit CFI pseudo-instructions, it works |
| 19329 | // fine for CXX_FAST_TLS since the C++-style TLS access functions should be |
| 19330 | // nounwind. If we want to generalize this later, we may need to emit |
| 19331 | // CFI pseudo-instructions. |
| 19332 | assert(Entry->getParent()->getFunction().hasFnAttribute( |
| 19333 | Attribute::NoUnwind) && |
| 19334 | "Function should be nounwind in insertCopiesSplitCSR!" ); |
| 19335 | Entry->addLiveIn(*I); |
| 19336 | BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR) |
| 19337 | .addReg(*I); |
| 19338 | |
| 19339 | // Insert the copy-back instructions right before the terminator. |
| 19340 | for (auto *Exit : Exits) |
| 19341 | BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(), |
| 19342 | TII->get(TargetOpcode::COPY), *I) |
| 19343 | .addReg(NewVR); |
| 19344 | } |
| 19345 | } |
| 19346 | |
| 19347 | void ARMTargetLowering::finalizeLowering(MachineFunction &MF) const { |
| 19348 | MF.getFrameInfo().computeMaxCallFrameSize(MF); |
| 19349 | TargetLoweringBase::finalizeLowering(MF); |
| 19350 | } |
| 19351 | |